X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fdebug_sync.rs;h=b31ceacea15852def4203b037d8e36216094f5c4;hb=d629a7edb7241eee7fde9f5ccdf1c481d2d6297b;hp=7ee5ee521bc55e37fe578180c25f8d58e647bf53;hpb=34cdca91baa0187d13969855129073c764f4c895;p=rust-lightning diff --git a/lightning/src/debug_sync.rs b/lightning/src/debug_sync.rs index 7ee5ee52..b31ceace 100644 --- a/lightning/src/debug_sync.rs +++ b/lightning/src/debug_sync.rs @@ -43,30 +43,90 @@ impl Condvar { } thread_local! { - /// We track the set of locks currently held by a reference to their `MutexMetadata` - static MUTEXES_HELD: RefCell>> = RefCell::new(HashSet::new()); + /// We track the set of locks currently held by a reference to their `LockMetadata` + static LOCKS_HELD: RefCell>> = RefCell::new(HashSet::new()); } -static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0); +static LOCK_IDX: AtomicUsize = AtomicUsize::new(0); -/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of +/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of /// when the Mutex itself was constructed. -struct MutexMetadata { - mutex_idx: u64, - locked_before: StdMutex>>, +struct LockMetadata { + lock_idx: u64, + locked_before: StdMutex>>, #[cfg(feature = "backtrace")] - mutex_construction_bt: Backtrace, + lock_construction_bt: Backtrace, } -impl PartialEq for MutexMetadata { - fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx } +impl PartialEq for LockMetadata { + fn eq(&self, o: &LockMetadata) -> bool { self.lock_idx == o.lock_idx } } -impl Eq for MutexMetadata {} -impl std::hash::Hash for MutexMetadata { - fn hash(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); } +impl Eq for LockMetadata {} +impl std::hash::Hash for LockMetadata { + fn hash(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); } +} + +impl LockMetadata { + fn new() -> LockMetadata { + LockMetadata { + locked_before: StdMutex::new(HashSet::new()), + lock_idx: LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64, + #[cfg(feature = "backtrace")] + lock_construction_bt: Backtrace::new(), + } + } + + // Returns whether we were a recursive lock (only relevant for read) + fn _pre_lock(this: &Arc, read: bool) -> bool { + let mut inserted = false; + LOCKS_HELD.with(|held| { + // For each lock which is currently locked, check that no lock's locked-before + // set includes the lock we're about to lock, which would imply a lockorder + // inversion. + for locked in held.borrow().iter() { + if read && *locked == *this { + // Recursive read locks are explicitly allowed + return; + } + } + for locked in held.borrow().iter() { + if !read && *locked == *this { + panic!("Tried to lock a lock while it was held!"); + } + for locked_dep in locked.locked_before.lock().unwrap().iter() { + if *locked_dep == *this { + #[cfg(feature = "backtrace")] + panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.lock_construction_bt); + #[cfg(not(feature = "backtrace"))] + panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); + } + } + // Insert any already-held locks in our locked-before set. + this.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(this)); + inserted = true; + }); + inserted + } + + fn pre_lock(this: &Arc) { Self::_pre_lock(this, false); } + fn pre_read_lock(this: &Arc) -> bool { Self::_pre_lock(this, true) } + + fn try_locked(this: &Arc) { + LOCKS_HELD.with(|held| { + // Since a try-lock will simply fail if the lock is held already, we do not + // consider try-locks to ever generate lockorder inversions. However, if a try-lock + // succeeds, we do consider it to have created lockorder dependencies. + for locked in held.borrow().iter() { + this.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(this)); + }); + } } pub struct Mutex { inner: StdMutex, - deps: Arc, + deps: Arc, } #[must_use = "if unused the Mutex will immediately unlock"] @@ -88,7 +148,7 @@ impl<'a, T: Sized> MutexGuard<'a, T> { impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { - MUTEXES_HELD.with(|held| { + LOCKS_HELD.with(|held| { held.borrow_mut().remove(&self.mutex.deps); }); } @@ -110,104 +170,195 @@ impl DerefMut for MutexGuard<'_, T> { impl Mutex { pub fn new(inner: T) -> Mutex { - Mutex { - inner: StdMutex::new(inner), - deps: Arc::new(MutexMetadata { - locked_before: StdMutex::new(HashSet::new()), - mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64, - #[cfg(feature = "backtrace")] - mutex_construction_bt: Backtrace::new(), - }), - } + Mutex { inner: StdMutex::new(inner), deps: Arc::new(LockMetadata::new()) } } pub fn lock<'a>(&'a self) -> LockResult> { - MUTEXES_HELD.with(|held| { - // For each mutex which is currently locked, check that no mutex's locked-before - // set includes the mutex we're about to lock, which would imply a lockorder - // inversion. - for locked in held.borrow().iter() { - for locked_dep in locked.locked_before.lock().unwrap().iter() { - if *locked_dep == self.deps { - #[cfg(feature = "backtrace")] - panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt); - #[cfg(not(feature = "backtrace"))] - panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); - } - } - // Insert any already-held mutexes in our locked-before set. - self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); - } - held.borrow_mut().insert(Arc::clone(&self.deps)); - }); + LockMetadata::pre_lock(&self.deps); self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) } pub fn try_lock<'a>(&'a self) -> LockResult> { let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); if res.is_ok() { - MUTEXES_HELD.with(|held| { - // Since a try-lock will simply fail if the lock is held already, we do not - // consider try-locks to ever generate lockorder inversions. However, if a try-lock - // succeeds, we do consider it to have created lockorder dependencies. - for locked in held.borrow().iter() { - self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); - } - held.borrow_mut().insert(Arc::clone(&self.deps)); - }); + LockMetadata::try_locked(&self.deps); } res } } -pub struct RwLock { - inner: StdRwLock +pub struct RwLock { + inner: StdRwLock, + deps: Arc, } -pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { - lock: StdRwLockReadGuard<'a, T>, +pub struct RwLockReadGuard<'a, T: Sized + 'a> { + lock: &'a RwLock, + first_lock: bool, + guard: StdRwLockReadGuard<'a, T>, } -pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { - lock: StdRwLockWriteGuard<'a, T>, +pub struct RwLockWriteGuard<'a, T: Sized + 'a> { + lock: &'a RwLock, + guard: StdRwLockWriteGuard<'a, T>, } -impl Deref for RwLockReadGuard<'_, T> { +impl Deref for RwLockReadGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.guard.deref() } } -impl Deref for RwLockWriteGuard<'_, T> { +impl Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + if !self.first_lock { + // Note that its not strictly true that the first taken read lock will get unlocked + // last, but in practice our locks are always taken as RAII, so it should basically + // always be true. + return; + } + LOCKS_HELD.with(|held| { + held.borrow_mut().remove(&self.lock.deps); + }); + } +} + +impl Deref for RwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.guard.deref() + } +} + +impl Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + LOCKS_HELD.with(|held| { + held.borrow_mut().remove(&self.lock.deps); + }); } } -impl DerefMut for RwLockWriteGuard<'_, T> { +impl DerefMut for RwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - self.lock.deref_mut() + self.guard.deref_mut() } } impl RwLock { pub fn new(inner: T) -> RwLock { - RwLock { inner: StdRwLock::new(inner) } + RwLock { inner: StdRwLock::new(inner), deps: Arc::new(LockMetadata::new()) } } pub fn read<'a>(&'a self) -> LockResult> { - self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) + let first_lock = LockMetadata::pre_read_lock(&self.deps); + self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ()) } pub fn write<'a>(&'a self) -> LockResult> { - self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + LockMetadata::pre_lock(&self.deps); + self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()) } pub fn try_write<'a>(&'a self) -> LockResult> { - self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()); + if res.is_ok() { + LockMetadata::try_locked(&self.deps); + } + res + } +} + +#[test] +#[should_panic] +fn recursive_lock_fail() { + let mutex = Mutex::new(()); + let _a = mutex.lock().unwrap(); + let _b = mutex.lock().unwrap(); +} + +#[test] +fn recursive_read() { + let lock = RwLock::new(()); + let _a = lock.read().unwrap(); + let _b = lock.read().unwrap(); +} + +#[test] +#[should_panic] +fn lockorder_fail() { + let a = Mutex::new(()); + let b = Mutex::new(()); + { + let _a = a.lock().unwrap(); + let _b = b.lock().unwrap(); + } + { + let _b = b.lock().unwrap(); + let _a = a.lock().unwrap(); + } +} + +#[test] +#[should_panic] +fn write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.write().unwrap(); + } + { + let _b = b.write().unwrap(); + let _a = a.write().unwrap(); + } +} + +#[test] +#[should_panic] +fn read_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } +} + +#[test] +fn read_recurisve_no_lockorder() { + // Like the above, but note that no lockorder is implied when we recursively read-lock a + // RwLock, causing this to pass just fine. + let a = RwLock::new(()); + let b = RwLock::new(()); + let _outer = a.read().unwrap(); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } +} + +#[test] +#[should_panic] +fn read_write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.write().unwrap(); } }