use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
use std::sync::Condvar as StdCondvar;
+pub use std::sync::WaitTimeoutResult;
+
use crate::prelude::HashMap;
+use super::{LockTestExt, LockHeldState};
+
#[cfg(feature = "backtrace")]
use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
Condvar { inner: StdCondvar::new() }
}
- pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
+ pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, condition: F)
+ -> LockResult<MutexGuard<'a, T>> {
let mutex: &'a Mutex<T> = guard.mutex;
- self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
+ self.inner.wait_while(guard.into_inner(), condition).map(|lock| MutexGuard { mutex, lock })
+ .map_err(|_| ())
}
#[allow(unused)]
- pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
+ pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F)
+ -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
let mutex = guard.mutex;
- self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
+ self.inner.wait_timeout_while(guard.into_inner(), dur, condition).map_err(|_| ())
+ .map(|(lock, e)| (MutexGuard { mutex, lock }, e))
}
pub fn notify_all(&self) { self.inner.notify_all(); }
_lockdep_trace: Backtrace,
}
+// Locates the frame preceding the earliest `debug_sync` frame in the call stack. This ensures we
+// can properly detect a lock's construction and acquiral callsites, since the latter may contain
+// multiple `debug_sync` frames.
#[cfg(feature = "backtrace")]
-fn get_construction_location(backtrace: &Backtrace) -> String {
- // Find the first frame that is after `debug_sync` (or that is in our tests) and use
- // that as the mutex construction site. Note that the first few frames may be in
- // the `backtrace` crate, so we have to ignore those.
- let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
+fn locate_call_symbol(backtrace: &Backtrace) -> (String, Option<u32>) {
+ // Find the earliest `debug_sync` frame (or that is in our tests) and use the frame preceding it
+ // as the callsite. Note that the first few frames may be in the `backtrace` crate, so we have
+ // to ignore those.
+ let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
let mut found_debug_sync = false;
- for frame in backtrace.frames() {
- for symbol in frame.symbols() {
- let symbol_name = symbol.name().unwrap().as_str().unwrap();
- if !sync_mutex_constr_regex.is_match(symbol_name) {
- if found_debug_sync {
- if let Some(col) = symbol.colno() {
- return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col);
- } else {
- // Windows debug symbols don't support column numbers, so fall back to
- // line numbers only if no `colno` is available
- return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap());
+ let mut symbol_after_latest_debug_sync = None;
+ for frame in backtrace.frames().iter() {
+ for symbol in frame.symbols().iter() {
+ if let Some(symbol_name) = symbol.name().map(|name| name.as_str()).flatten() {
+ if !sync_mutex_constr_regex.is_match(symbol_name) {
+ if found_debug_sync {
+ symbol_after_latest_debug_sync = Some(symbol);
+ found_debug_sync = false;
}
- }
- } else { found_debug_sync = true; }
+ } else { found_debug_sync = true; }
+ }
}
}
- panic!("Couldn't find mutex construction callsite");
+ let symbol = symbol_after_latest_debug_sync.expect("Couldn't find lock call symbol");
+ (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
}
impl LockMetadata {
#[cfg(feature = "backtrace")]
{
- let lock_constr_location = get_construction_location(&res._lock_construction_bt);
+ let (lock_constr_location, lock_constr_colno) =
+ locate_call_symbol(&res._lock_construction_bt);
LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
match locks.entry(lock_constr_location) {
- hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
+ hash_map::Entry::Occupied(e) => {
+ assert_eq!(lock_constr_colno,
+ locate_call_symbol(&e.get()._lock_construction_bt).1,
+ "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
+ return Arc::clone(e.get())
+ },
hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
}
}
res
}
- // Returns whether we were a recursive lock (only relevant for read)
- fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
- let mut inserted = false;
+ fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
LOCKS_HELD.with(|held| {
- // For each lock which is currently locked, check that no lock's locked-before
- // set includes the lock we're about to lock, which would imply a lockorder
- // inversion.
+ // For each lock that is currently held, check that no lock's `locked_before` set
+ // includes the lock we're about to hold, which would imply a lockorder inversion.
for (locked_idx, _locked) in held.borrow().iter() {
- if read && *locked_idx == this.lock_idx {
- // Recursive read locks are explicitly allowed
- return;
+ if *locked_idx == this.lock_idx {
+ // Note that with `feature = "backtrace"` set, we may be looking at different
+ // instances of the same lock. Still, doing so is quite risky, a total order
+ // must be maintained, and doing so across a set of otherwise-identical mutexes
+ // is fraught with issues.
+ #[cfg(feature = "backtrace")]
+ debug_assert!(_double_lock_self_allowed,
+ "Tried to acquire a lock while it was held!\nLock constructed at {}",
+ locate_call_symbol(&this._lock_construction_bt).0);
+ #[cfg(not(feature = "backtrace"))]
+ panic!("Tried to acquire a lock while it was held!");
}
}
- for (locked_idx, locked) in held.borrow().iter() {
- if !read && *locked_idx == this.lock_idx {
- // With `feature = "backtrace"` set, we may be looking at different instances
- // of the same lock.
- debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
- }
+ for (_locked_idx, locked) in held.borrow().iter() {
for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
- if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
- #[cfg(feature = "backtrace")]
- panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
- get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
- get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
- _locked_dep._lockdep_trace);
- #[cfg(not(feature = "backtrace"))]
- panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
+ let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
+ let has_same_construction = *locked_dep_idx == locked.lock_idx;
+ if is_dep_this_lock && !has_same_construction {
+ #[allow(unused_mut, unused_assignments)]
+ let mut has_same_callsite = false;
+ #[cfg(feature = "backtrace")] {
+ has_same_callsite = _double_lock_self_allowed &&
+ locate_call_symbol(&_locked_dep._lockdep_trace) ==
+ locate_call_symbol(&Backtrace::new());
+ }
+ if !has_same_callsite {
+ #[cfg(feature = "backtrace")]
+ panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
+ locate_call_symbol(&this._lock_construction_bt).0,
+ this.lock_idx, this._lock_construction_bt,
+ locate_call_symbol(&locked._lock_construction_bt).0,
+ locked.lock_idx, locked._lock_construction_bt,
+ _locked_dep._lockdep_trace);
+ #[cfg(not(feature = "backtrace"))]
+ panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
+ }
}
}
// Insert any already-held locks in our locked-before set.
}
}
held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
- inserted = true;
});
- inserted
}
- fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
- fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
+ fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
+ let mut res = LockHeldState::NotHeldByThread;
+ LOCKS_HELD.with(|held| {
+ for (locked_idx, _locked) in held.borrow().iter() {
+ if *locked_idx == this.lock_idx {
+ res = LockHeldState::HeldByThread;
+ }
+ }
+ });
+ res
+ }
fn try_locked(this: &Arc<LockMetadata>) {
LOCKS_HELD.with(|held| {
inner: StdMutex<T>,
deps: Arc<LockMetadata>,
}
+impl<T: Sized> Mutex<T> {
+ pub(crate) fn into_inner(self) -> LockResult<T> {
+ self.inner.into_inner().map_err(|_| ())
+ }
+}
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, T: Sized + 'a> {
}
pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
- LockMetadata::pre_lock(&self.deps);
+ LockMetadata::pre_lock(&self.deps, false);
self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
}
}
}
+impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ LockMetadata::held_by_thread(&self.deps)
+ }
+ type ExclLock = MutexGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
+ LockMetadata::pre_lock(&self.deps, true);
+ self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
+ }
+}
+
pub struct RwLock<T: Sized> {
inner: StdRwLock<T>,
deps: Arc<LockMetadata>,
pub struct RwLockReadGuard<'a, T: Sized + 'a> {
lock: &'a RwLock<T>,
- first_lock: bool,
guard: StdRwLockReadGuard<'a, T>,
}
impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
fn drop(&mut self) {
- if !self.first_lock {
- // Note that its not strictly true that the first taken read lock will get unlocked
- // last, but in practice our locks are always taken as RAII, so it should basically
- // always be true.
- return;
- }
LOCKS_HELD.with(|held| {
held.borrow_mut().remove(&self.lock.deps.lock_idx);
});
}
pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
- let first_lock = LockMetadata::pre_read_lock(&self.deps);
- self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
+ // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may
+ // deadlock trying to take a second read lock if another thread is waiting on the write
+ // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees
+ // such a deadlock.
+ LockMetadata::pre_lock(&self.deps, false);
+ self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
}
pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
- LockMetadata::pre_lock(&self.deps);
+ LockMetadata::pre_lock(&self.deps, false);
self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
}
}
}
-pub type FairRwLock<T> = RwLock<T>;
-
-mod tests {
- use super::{RwLock, Mutex};
-
- #[test]
- #[should_panic]
- #[cfg(not(feature = "backtrace"))]
- fn recursive_lock_fail() {
- let mutex = Mutex::new(());
- let _a = mutex.lock().unwrap();
- let _b = mutex.lock().unwrap();
- }
-
- #[test]
- fn recursive_read() {
- let lock = RwLock::new(());
- let _a = lock.read().unwrap();
- let _b = lock.read().unwrap();
- }
-
- #[test]
- #[should_panic]
- fn lockorder_fail() {
- let a = Mutex::new(());
- let b = Mutex::new(());
- {
- let _a = a.lock().unwrap();
- let _b = b.lock().unwrap();
- }
- {
- let _b = b.lock().unwrap();
- let _a = a.lock().unwrap();
- }
- }
-
- #[test]
- #[should_panic]
- fn write_lockorder_fail() {
- let a = RwLock::new(());
- let b = RwLock::new(());
- {
- let _a = a.write().unwrap();
- let _b = b.write().unwrap();
- }
- {
- let _b = b.write().unwrap();
- let _a = a.write().unwrap();
- }
- }
-
- #[test]
- #[should_panic]
- fn read_lockorder_fail() {
- let a = RwLock::new(());
- let b = RwLock::new(());
- {
- let _a = a.read().unwrap();
- let _b = b.read().unwrap();
- }
- {
- let _b = b.read().unwrap();
- let _a = a.read().unwrap();
- }
- }
-
- #[test]
- fn read_recursive_no_lockorder() {
- // Like the above, but note that no lockorder is implied when we recursively read-lock a
- // RwLock, causing this to pass just fine.
- let a = RwLock::new(());
- let b = RwLock::new(());
- let _outer = a.read().unwrap();
- {
- let _a = a.read().unwrap();
- let _b = b.read().unwrap();
- }
- {
- let _b = b.read().unwrap();
- let _a = a.read().unwrap();
- }
+impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
+ #[inline]
+ fn held_by_thread(&self) -> LockHeldState {
+ LockMetadata::held_by_thread(&self.deps)
}
-
- #[test]
- #[should_panic]
- fn read_write_lockorder_fail() {
- let a = RwLock::new(());
- let b = RwLock::new(());
- {
- let _a = a.write().unwrap();
- let _b = b.read().unwrap();
- }
- {
- let _b = b.read().unwrap();
- let _a = a.write().unwrap();
- }
+ type ExclLock = RwLockWriteGuard<'a, T>;
+ #[inline]
+ fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
+ LockMetadata::pre_lock(&self.deps, true);
+ self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap()
}
}
+
+pub type FairRwLock<T> = RwLock<T>;