From 3648784f816ebdd8d34a43baaef6b7b6ce5202ed Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 9 Mar 2022 06:24:06 +0000 Subject: [PATCH] Implement lockorder checking on RwLocks in debug_sync --- lightning/src/debug_sync.rs | 164 +++++++++++++++++++++++++++++++++--- 1 file changed, 152 insertions(+), 12 deletions(-) diff --git a/lightning/src/debug_sync.rs b/lightning/src/debug_sync.rs index 3eb342af2..04fc86bbb 100644 --- a/lightning/src/debug_sync.rs +++ b/lightning/src/debug_sync.rs @@ -74,12 +74,23 @@ impl MutexMetadata { } } - fn pre_lock(this: &Arc) { + // Returns whether we were a recursive lock (only relevant for read) + fn _pre_lock(this: &Arc, read: bool) -> bool { + let mut inserted = false; MUTEXES_HELD.with(|held| { // For each mutex which is currently locked, check that no mutex's locked-before // set includes the mutex we're about to lock, which would imply a lockorder // inversion. for locked in held.borrow().iter() { + if read && *locked == *this { + // Recursive read locks are explicitly allowed + return; + } + } + for locked in held.borrow().iter() { + if !read && *locked == *this { + panic!("Tried to lock a mutex while it was held!"); + } for locked_dep in locked.locked_before.lock().unwrap().iter() { if *locked_dep == *this { #[cfg(feature = "backtrace")] @@ -92,9 +103,14 @@ impl MutexMetadata { this.locked_before.lock().unwrap().insert(Arc::clone(locked)); } held.borrow_mut().insert(Arc::clone(this)); + inserted = true; }); + inserted } + fn pre_lock(this: &Arc) { Self::_pre_lock(this, false); } + fn pre_read_lock(this: &Arc) -> bool { Self::_pre_lock(this, true) } + fn try_locked(this: &Arc) { MUTEXES_HELD.with(|held| { // Since a try-lock will simply fail if the lock is held already, we do not @@ -171,19 +187,23 @@ impl Mutex { } } -pub struct RwLock { - inner: StdRwLock +pub struct RwLock { + inner: StdRwLock, + deps: Arc, } -pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { +pub struct RwLockReadGuard<'a, T: Sized + 'a> { + mutex: &'a RwLock, + first_lock: bool, lock: StdRwLockReadGuard<'a, T>, } -pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { +pub struct RwLockWriteGuard<'a, T: Sized + 'a> { + mutex: &'a RwLock, lock: StdRwLockWriteGuard<'a, T>, } -impl Deref for RwLockReadGuard<'_, T> { +impl Deref for RwLockReadGuard<'_, T> { type Target = T; fn deref(&self) -> &T { @@ -191,7 +211,21 @@ impl Deref for RwLockReadGuard<'_, T> { } } -impl Deref for RwLockWriteGuard<'_, T> { +impl Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + if !self.first_lock { + // Note that its not strictly true that the first taken read lock will get unlocked + // last, but in practice our locks are always taken as RAII, so it should basically + // always be true. + return; + } + MUTEXES_HELD.with(|held| { + held.borrow_mut().remove(&self.mutex.deps); + }); + } +} + +impl Deref for RwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &T { @@ -199,7 +233,15 @@ impl Deref for RwLockWriteGuard<'_, T> { } } -impl DerefMut for RwLockWriteGuard<'_, T> { +impl Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + MUTEXES_HELD.with(|held| { + held.borrow_mut().remove(&self.mutex.deps); + }); + } +} + +impl DerefMut for RwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { self.lock.deref_mut() } @@ -207,18 +249,116 @@ impl DerefMut for RwLockWriteGuard<'_, T> { impl RwLock { pub fn new(inner: T) -> RwLock { - RwLock { inner: StdRwLock::new(inner) } + RwLock { inner: StdRwLock::new(inner), deps: Arc::new(MutexMetadata::new()) } } pub fn read<'a>(&'a self) -> LockResult> { - self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) + let first_lock = MutexMetadata::pre_read_lock(&self.deps); + self.inner.read().map(|lock| RwLockReadGuard { mutex: self, lock, first_lock }).map_err(|_| ()) } pub fn write<'a>(&'a self) -> LockResult> { - self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + MutexMetadata::pre_lock(&self.deps); + self.inner.write().map(|lock| RwLockWriteGuard { mutex: self, lock }).map_err(|_| ()) } pub fn try_write<'a>(&'a self) -> LockResult> { - self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + let res = self.inner.try_write().map(|lock| RwLockWriteGuard { mutex: self, lock }).map_err(|_| ()); + if res.is_ok() { + MutexMetadata::try_locked(&self.deps); + } + res + } +} + +#[test] +#[should_panic] +fn recursive_lock_fail() { + let mutex = Mutex::new(()); + let _a = mutex.lock().unwrap(); + let _b = mutex.lock().unwrap(); +} + +#[test] +fn recursive_read() { + let lock = RwLock::new(()); + let _a = lock.read().unwrap(); + let _b = lock.read().unwrap(); +} + +#[test] +#[should_panic] +fn lockorder_fail() { + let a = Mutex::new(()); + let b = Mutex::new(()); + { + let _a = a.lock().unwrap(); + let _b = b.lock().unwrap(); + } + { + let _b = b.lock().unwrap(); + let _a = a.lock().unwrap(); + } +} + +#[test] +#[should_panic] +fn write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.write().unwrap(); + } + { + let _b = b.write().unwrap(); + let _a = a.write().unwrap(); + } +} + +#[test] +#[should_panic] +fn read_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } +} + +#[test] +fn read_recurisve_no_lockorder() { + // Like the above, but note that no lockorder is implied when we recursively read-lock a + // RwLock, causing this to pass just fine. + let a = RwLock::new(()); + let b = RwLock::new(()); + let _outer = a.read().unwrap(); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } +} + +#[test] +#[should_panic] +fn read_write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.write().unwrap(); } } -- 2.39.5