X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fsync%2Fdebug_sync.rs;h=aa9f5fe9c17d19ca4ffe778faadd1b128123e6f0;hb=9c08fbd435b097c0aeec2843d8b4a6fdec06a8f0;hp=ac82475f964b4e38faf17463ce9772a4c053d1cd;hpb=230331f3e8efe9cee0ad2dc1644051bd9679a01f;p=rust-lightning diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index ac82475f..aa9f5fe9 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -14,6 +14,8 @@ use std::sync::Condvar as StdCondvar; use crate::prelude::HashMap; +use super::{LockTestExt, LockHeldState}; + #[cfg(feature = "backtrace")] use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once}; @@ -77,7 +79,7 @@ fn get_construction_location(backtrace: &Backtrace) -> String { // Find the first frame that is after `debug_sync` (or that is in our tests) and use // that as the mutex construction site. Note that the first few frames may be in // the `backtrace` crate, so we have to ignore those. - let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap(); + let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap(); let mut found_debug_sync = false; for frame in backtrace.frames() { for symbol in frame.symbols() { @@ -122,21 +124,13 @@ impl LockMetadata { res } - // Returns whether we were a recursive lock (only relevant for read) - fn _pre_lock(this: &Arc, read: bool) -> bool { - let mut inserted = false; + fn pre_lock(this: &Arc) { LOCKS_HELD.with(|held| { // For each lock which is currently locked, check that no lock's locked-before // set includes the lock we're about to lock, which would imply a lockorder // inversion. - for (locked_idx, _locked) in held.borrow().iter() { - if read && *locked_idx == this.lock_idx { - // Recursive read locks are explicitly allowed - return; - } - } for (locked_idx, locked) in held.borrow().iter() { - if !read && *locked_idx == this.lock_idx { + if *locked_idx == this.lock_idx { // With `feature = "backtrace"` set, we may be looking at different instances // of the same lock. debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!"); @@ -160,13 +154,20 @@ impl LockMetadata { } } held.borrow_mut().insert(this.lock_idx, Arc::clone(this)); - inserted = true; }); - inserted } - fn pre_lock(this: &Arc) { Self::_pre_lock(this, false); } - fn pre_read_lock(this: &Arc) -> bool { Self::_pre_lock(this, true) } + fn held_by_thread(this: &Arc) -> LockHeldState { + let mut res = LockHeldState::NotHeldByThread; + LOCKS_HELD.with(|held| { + for (locked_idx, _locked) in held.borrow().iter() { + if *locked_idx == this.lock_idx { + res = LockHeldState::HeldByThread; + } + } + }); + res + } fn try_locked(this: &Arc) { LOCKS_HELD.with(|held| { @@ -248,6 +249,13 @@ impl Mutex { } } +impl LockTestExt for Mutex { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + LockMetadata::held_by_thread(&self.deps) + } +} + pub struct RwLock { inner: StdRwLock, deps: Arc, @@ -255,7 +263,6 @@ pub struct RwLock { pub struct RwLockReadGuard<'a, T: Sized + 'a> { lock: &'a RwLock, - first_lock: bool, guard: StdRwLockReadGuard<'a, T>, } @@ -274,12 +281,6 @@ impl Deref for RwLockReadGuard<'_, T> { impl Drop for RwLockReadGuard<'_, T> { fn drop(&mut self) { - if !self.first_lock { - // Note that its not strictly true that the first taken read lock will get unlocked - // last, but in practice our locks are always taken as RAII, so it should basically - // always be true. - return; - } LOCKS_HELD.with(|held| { held.borrow_mut().remove(&self.lock.deps.lock_idx); }); @@ -314,8 +315,11 @@ impl RwLock { } pub fn read<'a>(&'a self) -> LockResult> { - let first_lock = LockMetadata::pre_read_lock(&self.deps); - self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ()) + // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may + // deadlock trying to take a second read lock if another thread is waiting on the write + // lock. Its platform dependent (but our in-tree `FairRwLock` guarantees this behavior). + LockMetadata::pre_lock(&self.deps); + self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ()) } pub fn write<'a>(&'a self) -> LockResult> { @@ -332,4 +336,11 @@ impl RwLock { } } +impl LockTestExt for RwLock { + #[inline] + fn held_by_thread(&self) -> LockHeldState { + LockMetadata::held_by_thread(&self.deps) + } +} + pub type FairRwLock = RwLock;