From d86e07327c0930bb82f7dadd2d2bcfc15e94205d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 13 Jan 2022 01:00:43 +0000 Subject: [PATCH] Check lockorders in tests with a trivial lockorder tracker --- .github/workflows/build.yml | 4 + lightning/Cargo.toml | 1 + lightning/src/debug_sync.rs | 213 ++++++++++++++++++++++++++++++++++++ lightning/src/lib.rs | 8 ++ 4 files changed, 226 insertions(+) create mode 100644 lightning/src/debug_sync.rs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 96c62922..e2deced0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -101,6 +101,10 @@ jobs: RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client RUSTFLAGS="-C link-dead-code" cargo build --verbose --color always --features rpc-client,rest-client,tokio + - name: Test backtrace-debug builds on Rust ${{ matrix.toolchain }} + if: "matrix.build-no-std" + run: | + cd lightning && cargo test --verbose --color always --features backtrace - name: Test on Rust ${{ matrix.toolchain }} with net-tokio if: "matrix.build-net-tokio && !matrix.coverage" run: cargo test --verbose --color always diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 1234cd5c..1dbca464 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -39,6 +39,7 @@ secp256k1 = { version = "0.20.2", default-features = false, features = ["alloc"] hashbrown = { version = "0.11", optional = true } hex = { version = "0.3", optional = true } regex = { version = "0.1.80", optional = true } +backtrace = { version = "0.3", optional = true } core2 = { version = "0.3.0", optional = true, default-features = false } diff --git a/lightning/src/debug_sync.rs b/lightning/src/debug_sync.rs new file mode 100644 index 00000000..7ee5ee52 --- /dev/null +++ b/lightning/src/debug_sync.rs @@ -0,0 +1,213 @@ +pub use ::alloc::sync::Arc; +use core::ops::{Deref, DerefMut}; +use core::time::Duration; + +use std::collections::HashSet; +use std::cell::RefCell; + +use std::sync::atomic::{AtomicUsize, Ordering}; + +use std::sync::Mutex as StdMutex; +use std::sync::MutexGuard as StdMutexGuard; +use std::sync::RwLock as StdRwLock; +use std::sync::RwLockReadGuard as StdRwLockReadGuard; +use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; +use std::sync::Condvar as StdCondvar; + +#[cfg(feature = "backtrace")] +use backtrace::Backtrace; + +pub type LockResult = Result; + +pub struct Condvar { + inner: StdCondvar, +} + +impl Condvar { + pub fn new() -> Condvar { + Condvar { inner: StdCondvar::new() } + } + + pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult> { + let mutex: &'a Mutex = guard.mutex; + self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ()) + } + + #[allow(unused)] + pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> { + let mutex = guard.mutex; + self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ()) + } + + pub fn notify_all(&self) { self.inner.notify_all(); } +} + +thread_local! { + /// We track the set of locks currently held by a reference to their `MutexMetadata` + static MUTEXES_HELD: RefCell>> = RefCell::new(HashSet::new()); +} +static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0); + +/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of +/// when the Mutex itself was constructed. +struct MutexMetadata { + mutex_idx: u64, + locked_before: StdMutex>>, + #[cfg(feature = "backtrace")] + mutex_construction_bt: Backtrace, +} +impl PartialEq for MutexMetadata { + fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx } +} +impl Eq for MutexMetadata {} +impl std::hash::Hash for MutexMetadata { + fn hash(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); } +} + +pub struct Mutex { + inner: StdMutex, + deps: Arc, +} + +#[must_use = "if unused the Mutex will immediately unlock"] +pub struct MutexGuard<'a, T: Sized + 'a> { + mutex: &'a Mutex, + lock: StdMutexGuard<'a, T>, +} + +impl<'a, T: Sized> MutexGuard<'a, T> { + fn into_inner(self) -> StdMutexGuard<'a, T> { + // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. + unsafe { + let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); + std::mem::forget(self); + v + } + } +} + +impl Drop for MutexGuard<'_, T> { + fn drop(&mut self) { + MUTEXES_HELD.with(|held| { + held.borrow_mut().remove(&self.mutex.deps); + }); + } +} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.lock.deref_mut() + } +} + +impl Mutex { + pub fn new(inner: T) -> Mutex { + Mutex { + inner: StdMutex::new(inner), + deps: Arc::new(MutexMetadata { + locked_before: StdMutex::new(HashSet::new()), + mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64, + #[cfg(feature = "backtrace")] + mutex_construction_bt: Backtrace::new(), + }), + } + } + + pub fn lock<'a>(&'a self) -> LockResult> { + MUTEXES_HELD.with(|held| { + // For each mutex which is currently locked, check that no mutex's locked-before + // set includes the mutex we're about to lock, which would imply a lockorder + // inversion. + for locked in held.borrow().iter() { + for locked_dep in locked.locked_before.lock().unwrap().iter() { + if *locked_dep == self.deps { + #[cfg(feature = "backtrace")] + panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt); + #[cfg(not(feature = "backtrace"))] + panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); + } + } + // Insert any already-held mutexes in our locked-before set. + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(&self.deps)); + }); + self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) + } + + pub fn try_lock<'a>(&'a self) -> LockResult> { + let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); + if res.is_ok() { + MUTEXES_HELD.with(|held| { + // Since a try-lock will simply fail if the lock is held already, we do not + // consider try-locks to ever generate lockorder inversions. However, if a try-lock + // succeeds, we do consider it to have created lockorder dependencies. + for locked in held.borrow().iter() { + self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); + } + held.borrow_mut().insert(Arc::clone(&self.deps)); + }); + } + res + } +} + +pub struct RwLock { + inner: StdRwLock +} + +pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { + lock: StdRwLockReadGuard<'a, T>, +} + +pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { + lock: StdRwLockWriteGuard<'a, T>, +} + +impl Deref for RwLockReadGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl Deref for RwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.lock.deref() + } +} + +impl DerefMut for RwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + self.lock.deref_mut() + } +} + +impl RwLock { + pub fn new(inner: T) -> RwLock { + RwLock { inner: StdRwLock::new(inner) } + } + + pub fn read<'a>(&'a self) -> LockResult> { + self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) + } + + pub fn write<'a>(&'a self) -> LockResult> { + self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + } + + pub fn try_write<'a>(&'a self) -> LockResult> { + self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + } +} diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index 3338803f..ca18d7bb 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -143,8 +143,16 @@ mod prelude { pub use alloc::string::ToString; } +#[cfg(all(feature = "std", test))] +mod debug_sync; +#[cfg(all(feature = "backtrace", feature = "std", test))] +extern crate backtrace; + #[cfg(feature = "std")] mod sync { + #[cfg(test)] + pub use debug_sync::*; + #[cfg(not(test))] pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard}; } -- 2.30.2