X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fsync%2Fdebug_sync.rs;h=5b6acbcadd5bf38686f8ec43dc761eae6a9e3ba1;hb=refs%2Fheads%2F2023-02-no-recursive-read-locks;hp=aa9f5fe9c17d19ca4ffe778faadd1b128123e6f0;hpb=9c08fbd435b097c0aeec2843d8b4a6fdec06a8f0;p=rust-lightning diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index aa9f5fe9..5b6acbca 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -75,7 +75,7 @@ struct LockDep { } #[cfg(feature = "backtrace")] -fn get_construction_location(backtrace: &Backtrace) -> String { +fn get_construction_location(backtrace: &Backtrace) -> (String, Option) { // Find the first frame that is after `debug_sync` (or that is in our tests) and use // that as the mutex construction site. Note that the first few frames may be in // the `backtrace` crate, so we have to ignore those. @@ -86,13 +86,7 @@ fn get_construction_location(backtrace: &Backtrace) -> String { let symbol_name = symbol.name().unwrap().as_str().unwrap(); if !sync_mutex_constr_regex.is_match(symbol_name) { if found_debug_sync { - if let Some(col) = symbol.colno() { - return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col); - } else { - // Windows debug symbols don't support column numbers, so fall back to - // line numbers only if no `colno` is available - return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()); - } + return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno()); } } else { found_debug_sync = true; } } @@ -113,34 +107,51 @@ impl LockMetadata { #[cfg(feature = "backtrace")] { - let lock_constr_location = get_construction_location(&res._lock_construction_bt); + let (lock_constr_location, lock_constr_colno) = + get_construction_location(&res._lock_construction_bt); LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } }); let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); match locks.entry(lock_constr_location) { - hash_map::Entry::Occupied(e) => return Arc::clone(e.get()), + hash_map::Entry::Occupied(e) => { + assert_eq!(lock_constr_colno, + get_construction_location(&e.get()._lock_construction_bt).1, + "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives."); + return Arc::clone(e.get()) + }, hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); }, } } res } - fn pre_lock(this: &Arc) { + fn pre_lock(this: &Arc, _double_lock_self_allowed: bool) { LOCKS_HELD.with(|held| { // For each lock which is currently locked, check that no lock's locked-before // set includes the lock we're about to lock, which would imply a lockorder // inversion. for (locked_idx, locked) in held.borrow().iter() { if *locked_idx == this.lock_idx { - // With `feature = "backtrace"` set, we may be looking at different instances - // of the same lock. - debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!"); + // Note that with `feature = "backtrace"` set, we may be looking at different + // instances of the same lock. Still, doing so is quite risky, a total order + // must be maintained, and doing so across a set of otherwise-identical mutexes + // is fraught with issues. + #[cfg(feature = "backtrace")] + debug_assert!(_double_lock_self_allowed, + "Tried to acquire a lock while it was held!\nLock constructed at {}", + get_construction_location(&this._lock_construction_bt).0); + #[cfg(not(feature = "backtrace"))] + panic!("Tried to acquire a lock while it was held!"); } + } + for (locked_idx, locked) in held.borrow().iter() { for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx { #[cfg(feature = "backtrace")] panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n", - get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt, - get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt, + get_construction_location(&this._lock_construction_bt).0, + this.lock_idx, this._lock_construction_bt, + get_construction_location(&locked._lock_construction_bt).0, + locked.lock_idx, locked._lock_construction_bt, _locked_dep._lockdep_trace); #[cfg(not(feature = "backtrace"))] panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); @@ -236,7 +247,7 @@ impl Mutex { } pub fn lock<'a>(&'a self) -> LockResult> { - LockMetadata::pre_lock(&self.deps); + LockMetadata::pre_lock(&self.deps, false); self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) } @@ -249,11 +260,17 @@ impl Mutex { } } -impl LockTestExt for Mutex { +impl<'a, T: 'a> LockTestExt<'a> for Mutex { #[inline] fn held_by_thread(&self) -> LockHeldState { LockMetadata::held_by_thread(&self.deps) } + type ExclLock = MutexGuard<'a, T>; + #[inline] + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + LockMetadata::pre_lock(&self.deps, true); + self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap() + } } pub struct RwLock { @@ -317,13 +334,14 @@ impl RwLock { pub fn read<'a>(&'a self) -> LockResult> { // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may // deadlock trying to take a second read lock if another thread is waiting on the write - // lock. Its platform dependent (but our in-tree `FairRwLock` guarantees this behavior). - LockMetadata::pre_lock(&self.deps); + // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees + // such a deadlock. + LockMetadata::pre_lock(&self.deps, false); self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ()) } pub fn write<'a>(&'a self) -> LockResult> { - LockMetadata::pre_lock(&self.deps); + LockMetadata::pre_lock(&self.deps, false); self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()) } @@ -336,11 +354,17 @@ impl RwLock { } } -impl LockTestExt for RwLock { +impl<'a, T: 'a> LockTestExt<'a> for RwLock { #[inline] fn held_by_thread(&self) -> LockHeldState { LockMetadata::held_by_thread(&self.deps) } + type ExclLock = RwLockWriteGuard<'a, T>; + #[inline] + fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> { + LockMetadata::pre_lock(&self.deps, true); + self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap() + } } pub type FairRwLock = RwLock;