X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fdebug_sync.rs;h=b61d1cb55e8cff3143c686c4ad6fbc13427b47d9;hb=080c70f98f8b96bae965ca90ca690f06cead4b3b;hp=7ee5ee521bc55e37fe578180c25f8d58e647bf53;hpb=34cdca91baa0187d13969855129073c764f4c895;p=rust-lightning diff --git a/lightning/src/debug_sync.rs b/lightning/src/debug_sync.rs index 7ee5ee52..b61d1cb5 100644 --- a/lightning/src/debug_sync.rs +++ b/lightning/src/debug_sync.rs @@ -2,11 +2,9 @@ pub use ::alloc::sync::Arc; use core::ops::{Deref, DerefMut}; use core::time::Duration; -use std::collections::HashSet; use std::cell::RefCell; use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Mutex as StdMutex; use std::sync::MutexGuard as StdMutexGuard; use std::sync::RwLock as StdRwLock; @@ -14,8 +12,15 @@ use std::sync::RwLockReadGuard as StdRwLockReadGuard; use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; use std::sync::Condvar as StdCondvar; +use crate::prelude::HashMap; + #[cfg(feature = "backtrace")] -use backtrace::Backtrace; +use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once}; + +#[cfg(not(feature = "backtrace"))] +struct Backtrace{} +#[cfg(not(feature = "backtrace"))] +impl Backtrace { fn new() -> Backtrace { Backtrace {} } } pub type LockResult = Result; @@ -43,30 +48,146 @@ impl Condvar { } thread_local! { - /// We track the set of locks currently held by a reference to their `MutexMetadata` - static MUTEXES_HELD: RefCell>> = RefCell::new(HashSet::new()); + /// We track the set of locks currently held by a reference to their `LockMetadata` + static LOCKS_HELD: RefCell>> = RefCell::new(HashMap::new()); } -static MUTEX_IDX: AtomicUsize = AtomicUsize::new(0); +static LOCK_IDX: AtomicUsize = AtomicUsize::new(0); + +#[cfg(feature = "backtrace")] +static mut LOCKS: Option>>> = None; +#[cfg(feature = "backtrace")] +static LOCKS_INIT: Once = Once::new(); -/// Metadata about a single mutex, by id, the set of things locked-before it, and the backtrace of +/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of /// when the Mutex itself was constructed. -struct MutexMetadata { - mutex_idx: u64, - locked_before: StdMutex>>, - #[cfg(feature = "backtrace")] - mutex_construction_bt: Backtrace, +struct LockMetadata { + lock_idx: u64, + locked_before: StdMutex>, + _lock_construction_bt: Backtrace, } -impl PartialEq for MutexMetadata { - fn eq(&self, o: &MutexMetadata) -> bool { self.mutex_idx == o.mutex_idx } + +struct LockDep { + lock: Arc, + /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _ + _lockdep_trace: Backtrace, +} + +#[cfg(feature = "backtrace")] +fn get_construction_location(backtrace: &Backtrace) -> String { + // Find the first frame that is after `debug_sync` (or that is in our tests) and use + // that as the mutex construction site. Note that the first few frames may be in + // the `backtrace` crate, so we have to ignore those. + let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap(); + let mut found_debug_sync = false; + for frame in backtrace.frames() { + for symbol in frame.symbols() { + let symbol_name = symbol.name().unwrap().as_str().unwrap(); + if !sync_mutex_constr_regex.is_match(symbol_name) { + if found_debug_sync { + if let Some(col) = symbol.colno() { + return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col); + } else { + // Windows debug symbols don't support column numbers, so fall back to + // line numbers only if no `colno` is available + return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()); + } + } + } else { found_debug_sync = true; } + } + } + panic!("Couldn't find mutex construction callsite"); } -impl Eq for MutexMetadata {} -impl std::hash::Hash for MutexMetadata { - fn hash(&self, hasher: &mut H) { hasher.write_u64(self.mutex_idx); } + +impl LockMetadata { + fn new() -> Arc { + let backtrace = Backtrace::new(); + let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64; + + let res = Arc::new(LockMetadata { + locked_before: StdMutex::new(HashMap::new()), + lock_idx, + _lock_construction_bt: backtrace, + }); + + #[cfg(feature = "backtrace")] + { + let lock_constr_location = get_construction_location(&res._lock_construction_bt); + LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } }); + let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); + match locks.entry(lock_constr_location) { + hash_map::Entry::Occupied(e) => return Arc::clone(e.get()), + hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); }, + } + } + res + } + + // Returns whether we were a recursive lock (only relevant for read) + fn _pre_lock(this: &Arc, read: bool) -> bool { + let mut inserted = false; + LOCKS_HELD.with(|held| { + // For each lock which is currently locked, check that no lock's locked-before + // set includes the lock we're about to lock, which would imply a lockorder + // inversion. + for (locked_idx, _locked) in held.borrow().iter() { + if read && *locked_idx == this.lock_idx { + // Recursive read locks are explicitly allowed + return; + } + } + for (locked_idx, locked) in held.borrow().iter() { + if !read && *locked_idx == this.lock_idx { + // With `feature = "backtrace"` set, we may be looking at different instances + // of the same lock. + debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!"); + } + for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { + if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx { + #[cfg(feature = "backtrace")] + panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n", + get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt, + get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt, + _locked_dep._lockdep_trace); + #[cfg(not(feature = "backtrace"))] + panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); + } + } + // Insert any already-held locks in our locked-before set. + let mut locked_before = this.locked_before.lock().unwrap(); + if !locked_before.contains_key(&locked.lock_idx) { + let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() }; + locked_before.insert(lockdep.lock.lock_idx, lockdep); + } + } + held.borrow_mut().insert(this.lock_idx, Arc::clone(this)); + inserted = true; + }); + inserted + } + + fn pre_lock(this: &Arc) { Self::_pre_lock(this, false); } + fn pre_read_lock(this: &Arc) -> bool { Self::_pre_lock(this, true) } + + fn try_locked(this: &Arc) { + LOCKS_HELD.with(|held| { + // Since a try-lock will simply fail if the lock is held already, we do not + // consider try-locks to ever generate lockorder inversions. However, if a try-lock + // succeeds, we do consider it to have created lockorder dependencies. + let mut locked_before = this.locked_before.lock().unwrap(); + for (locked_idx, locked) in held.borrow().iter() { + if !locked_before.contains_key(locked_idx) { + let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() }; + locked_before.insert(*locked_idx, lockdep); + } + } + held.borrow_mut().insert(this.lock_idx, Arc::clone(this)); + }); + } } pub struct Mutex { inner: StdMutex, - deps: Arc, + deps: Arc, } #[must_use = "if unused the Mutex will immediately unlock"] @@ -88,8 +209,8 @@ impl<'a, T: Sized> MutexGuard<'a, T> { impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { - MUTEXES_HELD.with(|held| { - held.borrow_mut().remove(&self.mutex.deps); + LOCKS_HELD.with(|held| { + held.borrow_mut().remove(&self.mutex.deps.lock_idx); }); } } @@ -110,104 +231,202 @@ impl DerefMut for MutexGuard<'_, T> { impl Mutex { pub fn new(inner: T) -> Mutex { - Mutex { - inner: StdMutex::new(inner), - deps: Arc::new(MutexMetadata { - locked_before: StdMutex::new(HashSet::new()), - mutex_idx: MUTEX_IDX.fetch_add(1, Ordering::Relaxed) as u64, - #[cfg(feature = "backtrace")] - mutex_construction_bt: Backtrace::new(), - }), - } + Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() } } pub fn lock<'a>(&'a self) -> LockResult> { - MUTEXES_HELD.with(|held| { - // For each mutex which is currently locked, check that no mutex's locked-before - // set includes the mutex we're about to lock, which would imply a lockorder - // inversion. - for locked in held.borrow().iter() { - for locked_dep in locked.locked_before.lock().unwrap().iter() { - if *locked_dep == self.deps { - #[cfg(feature = "backtrace")] - panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.mutex_construction_bt); - #[cfg(not(feature = "backtrace"))] - panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); - } - } - // Insert any already-held mutexes in our locked-before set. - self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); - } - held.borrow_mut().insert(Arc::clone(&self.deps)); - }); + LockMetadata::pre_lock(&self.deps); self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) } pub fn try_lock<'a>(&'a self) -> LockResult> { let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); if res.is_ok() { - MUTEXES_HELD.with(|held| { - // Since a try-lock will simply fail if the lock is held already, we do not - // consider try-locks to ever generate lockorder inversions. However, if a try-lock - // succeeds, we do consider it to have created lockorder dependencies. - for locked in held.borrow().iter() { - self.deps.locked_before.lock().unwrap().insert(Arc::clone(locked)); - } - held.borrow_mut().insert(Arc::clone(&self.deps)); - }); + LockMetadata::try_locked(&self.deps); } res } } -pub struct RwLock { - inner: StdRwLock +pub struct RwLock { + inner: StdRwLock, + deps: Arc, } -pub struct RwLockReadGuard<'a, T: ?Sized + 'a> { - lock: StdRwLockReadGuard<'a, T>, +pub struct RwLockReadGuard<'a, T: Sized + 'a> { + lock: &'a RwLock, + first_lock: bool, + guard: StdRwLockReadGuard<'a, T>, } -pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> { - lock: StdRwLockWriteGuard<'a, T>, +pub struct RwLockWriteGuard<'a, T: Sized + 'a> { + lock: &'a RwLock, + guard: StdRwLockWriteGuard<'a, T>, } -impl Deref for RwLockReadGuard<'_, T> { +impl Deref for RwLockReadGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.guard.deref() + } +} + +impl Drop for RwLockReadGuard<'_, T> { + fn drop(&mut self) { + if !self.first_lock { + // Note that its not strictly true that the first taken read lock will get unlocked + // last, but in practice our locks are always taken as RAII, so it should basically + // always be true. + return; + } + LOCKS_HELD.with(|held| { + held.borrow_mut().remove(&self.lock.deps.lock_idx); + }); } } -impl Deref for RwLockWriteGuard<'_, T> { +impl Deref for RwLockWriteGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.guard.deref() + } +} + +impl Drop for RwLockWriteGuard<'_, T> { + fn drop(&mut self) { + LOCKS_HELD.with(|held| { + held.borrow_mut().remove(&self.lock.deps.lock_idx); + }); } } -impl DerefMut for RwLockWriteGuard<'_, T> { +impl DerefMut for RwLockWriteGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - self.lock.deref_mut() + self.guard.deref_mut() } } impl RwLock { pub fn new(inner: T) -> RwLock { - RwLock { inner: StdRwLock::new(inner) } + RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() } } pub fn read<'a>(&'a self) -> LockResult> { - self.inner.read().map(|lock| RwLockReadGuard { lock }).map_err(|_| ()) + let first_lock = LockMetadata::pre_read_lock(&self.deps); + self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ()) } pub fn write<'a>(&'a self) -> LockResult> { - self.inner.write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + LockMetadata::pre_lock(&self.deps); + self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()) } pub fn try_write<'a>(&'a self) -> LockResult> { - self.inner.try_write().map(|lock| RwLockWriteGuard { lock }).map_err(|_| ()) + let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ()); + if res.is_ok() { + LockMetadata::try_locked(&self.deps); + } + res + } +} + +pub type FairRwLock = RwLock; + +mod tests { + use super::{RwLock, Mutex}; + + #[test] + #[should_panic] + #[cfg(not(feature = "backtrace"))] + fn recursive_lock_fail() { + let mutex = Mutex::new(()); + let _a = mutex.lock().unwrap(); + let _b = mutex.lock().unwrap(); + } + + #[test] + fn recursive_read() { + let lock = RwLock::new(()); + let _a = lock.read().unwrap(); + let _b = lock.read().unwrap(); + } + + #[test] + #[should_panic] + fn lockorder_fail() { + let a = Mutex::new(()); + let b = Mutex::new(()); + { + let _a = a.lock().unwrap(); + let _b = b.lock().unwrap(); + } + { + let _b = b.lock().unwrap(); + let _a = a.lock().unwrap(); + } + } + + #[test] + #[should_panic] + fn write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.write().unwrap(); + } + { + let _b = b.write().unwrap(); + let _a = a.write().unwrap(); + } + } + + #[test] + #[should_panic] + fn read_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } + } + + #[test] + fn read_recursive_no_lockorder() { + // Like the above, but note that no lockorder is implied when we recursively read-lock a + // RwLock, causing this to pass just fine. + let a = RwLock::new(()); + let b = RwLock::new(()); + let _outer = a.read().unwrap(); + { + let _a = a.read().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.read().unwrap(); + } + } + + #[test] + #[should_panic] + fn read_write_lockorder_fail() { + let a = RwLock::new(()); + let b = RwLock::new(()); + { + let _a = a.write().unwrap(); + let _b = b.read().unwrap(); + } + { + let _b = b.read().unwrap(); + let _a = a.write().unwrap(); + } } }