From: Matt Corallo Date: Tue, 7 Feb 2023 19:46:08 +0000 (+0000) Subject: Test if a given mutex is locked by the current thread in tests X-Git-Tag: v0.0.114-beta~14^2~2 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=6090d9e6a862a2010eb80be56b7449947bc08374;p=rust-lightning Test if a given mutex is locked by the current thread in tests In anticipation of the next commit(s) adding threaded tests, we need to ensure our lockorder checks work fine with multiple threads. Sadly, currently we have tests in the form `assert!(mutex.try_lock().is_ok())` to assert that a given mutex is not locked by the caller to a function. The fix is rather simple given we already track mutexes locked by a thread in our `debug_sync` logic - simply replace the check with a new extension trait which (for test builds) checks the locked state by only looking at what was locked by the current thread. --- diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a042d2f9..2ced6c94 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -70,7 +70,7 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock}; +use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -1218,13 +1218,10 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - #[cfg(any(feature = "_test_utils", test))] - { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert!($self.pending_events.try_lock().is_ok()); - debug_assert!($self.per_peer_state.try_write().is_ok()); - } + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); let mut msg_events = Vec::with_capacity(2); @@ -3743,17 +3740,12 @@ where /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { - #[cfg(any(feature = "_test_utils", test))] - { - // Ensure that the peer state channel storage lock is not held when calling this - // function. - // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling - // this function with any `per_peer_state` peer lock aquired would. - let per_peer_state = self.per_peer_state.read().unwrap(); - for (_, peer) in per_peer_state.iter() { - debug_assert!(peer.try_lock().is_ok()); - } + // Ensure that no peer state channel storage lock is held when calling this function. + // This ensures that future code doesn't introduce a lock-order requirement for + // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling + // this function with any `per_peer_state` peer lock acquired would. + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } //TODO: There is a timing attack here where if a node fails an HTLC back to us they can diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index 9f7caa2c..56310937 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -14,6 +14,8 @@ use std::sync::Condvar as StdCondvar; use crate::prelude::HashMap; +use super::{LockTestExt, LockHeldState}; + #[cfg(feature = "backtrace")] use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once}; @@ -168,6 +170,18 @@ impl LockMetadata { fn pre_lock(this: &Arc) { Self::_pre_lock(this, false); } fn pre_read_lock(this: &Arc) -> bool { Self::_pre_lock(this, true) } + fn held_by_thread(this: &Arc) -> LockHeldState { + let mut res = LockHeldState::NotHeldByThread; + LOCKS_HELD.with(|held| { + for (locked_idx, _locked) in held.borrow().iter() { + if *locked_idx == this.lock_idx { + res = LockHeldState::HeldByThread; + } + } + }); + res + } + fn try_locked(this: &Arc) { LOCKS_HELD.with(|held| { // Since a try-lock will simply fail if the lock is held already, we do not @@ -248,6 +262,13 @@ impl Mutex { } } +impl LockTestExt for Mutex { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + LockMetadata::held_by_thread(&self.deps) + } +} + pub struct RwLock { inner: StdRwLock, deps: Arc, @@ -332,4 +353,11 @@ impl RwLock { } } +impl LockTestExt for RwLock { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + LockMetadata::held_by_thread(&self.deps) + } +} + pub type FairRwLock = RwLock; diff --git a/lightning/src/sync/fairrwlock.rs b/lightning/src/sync/fairrwlock.rs index 5715a8cf..a9519ac2 100644 --- a/lightning/src/sync/fairrwlock.rs +++ b/lightning/src/sync/fairrwlock.rs @@ -1,5 +1,6 @@ use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult}; use std::sync::atomic::{AtomicUsize, Ordering}; +use super::{LockHeldState, LockTestExt}; /// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on /// Linux with pthreads under the hood, readers trivially and completely starve writers). @@ -48,3 +49,11 @@ impl FairRwLock { self.lock.try_write() } } + +impl LockTestExt for FairRwLock { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + // fairrwlock is only built in non-test modes, so we should never support tests. + LockHeldState::Unsupported + } +} diff --git a/lightning/src/sync/mod.rs b/lightning/src/sync/mod.rs index 5b0cc9c1..50ef40e2 100644 --- a/lightning/src/sync/mod.rs +++ b/lightning/src/sync/mod.rs @@ -1,3 +1,16 @@ +#[allow(dead_code)] // Depending on the compilation flags some variants are never used +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum LockHeldState { + HeldByThread, + NotHeldByThread, + #[cfg(any(feature = "_bench_unstable", not(test)))] + Unsupported, +} + +pub(crate) trait LockTestExt { + fn held_by_thread(&self) -> LockHeldState; +} + #[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))] mod debug_sync; #[cfg(all(feature = "std", not(feature = "_bench_unstable"), test))] @@ -11,6 +24,19 @@ pub(crate) mod fairrwlock; #[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))] pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock}; +#[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))] +mod ext_impl { + use super::*; + impl LockTestExt for Mutex { + #[inline] + fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported } + } + impl LockTestExt for RwLock { + #[inline] + fn held_by_thread(&self) -> LockHeldState { LockHeldState::Unsupported } + } +} + #[cfg(not(feature = "std"))] mod nostd_sync; #[cfg(not(feature = "std"))] diff --git a/lightning/src/sync/nostd_sync.rs b/lightning/src/sync/nostd_sync.rs index caf88a7c..e17aa6ab 100644 --- a/lightning/src/sync/nostd_sync.rs +++ b/lightning/src/sync/nostd_sync.rs @@ -2,6 +2,7 @@ pub use ::alloc::sync::Arc; use core::ops::{Deref, DerefMut}; use core::time::Duration; use core::cell::{RefCell, Ref, RefMut}; +use super::{LockTestExt, LockHeldState}; pub type LockResult = Result; @@ -61,6 +62,14 @@ impl Mutex { } } +impl LockTestExt for Mutex { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + if self.lock().is_err() { return LockHeldState::HeldByThread; } + else { return LockHeldState::NotHeldByThread; } + } +} + pub struct RwLock { inner: RefCell } @@ -116,4 +125,12 @@ impl RwLock { } } +impl LockTestExt for RwLock { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + if self.write().is_err() { return LockHeldState::HeldByThread; } + else { return LockHeldState::NotHeldByThread; } + } +} + pub type FairRwLock = RwLock; diff --git a/lightning/src/sync/test_lockorder_checks.rs b/lightning/src/sync/test_lockorder_checks.rs index f9f30e2c..a3f746b1 100644 --- a/lightning/src/sync/test_lockorder_checks.rs +++ b/lightning/src/sync/test_lockorder_checks.rs @@ -1,5 +1,10 @@ use crate::sync::debug_sync::{RwLock, Mutex}; +use super::{LockHeldState, LockTestExt}; + +use std::sync::Arc; +use std::thread; + #[test] #[should_panic] #[cfg(not(feature = "backtrace"))] @@ -92,3 +97,22 @@ fn read_write_lockorder_fail() { let _a = a.write().unwrap(); } } + +#[test] +fn test_thread_locked_state() { + let mtx = Arc::new(Mutex::new(())); + let mtx_ref = Arc::clone(&mtx); + assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread); + + let lck = mtx.lock().unwrap(); + assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread); + + let thrd = std::thread::spawn(move || { + assert_eq!(mtx_ref.held_by_thread(), LockHeldState::NotHeldByThread); + }); + thrd.join().unwrap(); + assert_eq!(mtx.held_by_thread(), LockHeldState::HeldByThread); + + std::mem::drop(lck); + assert_eq!(mtx.held_by_thread(), LockHeldState::NotHeldByThread); +}