1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
5 use std::cell::RefCell;
7 use std::sync::atomic::{AtomicUsize, Ordering};
8 use std::sync::Mutex as StdMutex;
9 use std::sync::MutexGuard as StdMutexGuard;
10 use std::sync::RwLock as StdRwLock;
11 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
12 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
13 use std::sync::Condvar as StdCondvar;
15 pub use std::sync::WaitTimeoutResult;
17 use crate::prelude::*;
19 use super::{LockTestExt, LockHeldState};
21 #[cfg(feature = "backtrace")]
22 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
24 #[cfg(not(feature = "backtrace"))]
26 #[cfg(not(feature = "backtrace"))]
27 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
29 pub type LockResult<Guard> = Result<Guard, ()>;
36 pub fn new() -> Condvar {
37 Condvar { inner: StdCondvar::new() }
40 pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, condition: F)
41 -> LockResult<MutexGuard<'a, T>> {
42 let mutex: &'a Mutex<T> = guard.mutex;
43 self.inner.wait_while(guard.into_inner(), condition).map(|lock| MutexGuard { mutex, lock })
48 pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F)
49 -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
50 let mutex = guard.mutex;
51 self.inner.wait_timeout_while(guard.into_inner(), dur, condition).map_err(|_| ())
52 .map(|(lock, e)| (MutexGuard { mutex, lock }, e))
55 pub fn notify_all(&self) { self.inner.notify_all(); }
59 /// We track the set of locks currently held by a reference to their `LockMetadata`
60 static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(new_hash_map());
62 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
64 #[cfg(feature = "backtrace")]
65 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
66 #[cfg(feature = "backtrace")]
67 static LOCKS_INIT: Once = Once::new();
69 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
70 /// when the Mutex itself was constructed.
73 locked_before: StdMutex<HashMap<u64, LockDep>>,
74 _lock_construction_bt: Backtrace,
78 lock: Arc<LockMetadata>,
79 /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
80 _lockdep_trace: Backtrace,
83 // Locates the frame preceding the earliest `debug_sync` frame in the call stack. This ensures we
84 // can properly detect a lock's construction and acquiral callsites, since the latter may contain
85 // multiple `debug_sync` frames.
86 #[cfg(feature = "backtrace")]
87 fn locate_call_symbol(backtrace: &Backtrace) -> (String, Option<u32>) {
88 // Find the earliest `debug_sync` frame (or that is in our tests) and use the frame preceding it
89 // as the callsite. Note that the first few frames may be in the `backtrace` crate, so we have
91 let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
92 let mut found_debug_sync = false;
93 let mut symbol_after_latest_debug_sync = None;
94 for frame in backtrace.frames().iter() {
95 for symbol in frame.symbols().iter() {
96 if let Some(symbol_name) = symbol.name().map(|name| name.as_str()).flatten() {
97 if !sync_mutex_constr_regex.is_match(symbol_name) {
99 symbol_after_latest_debug_sync = Some(symbol);
100 found_debug_sync = false;
102 } else { found_debug_sync = true; }
106 let symbol = symbol_after_latest_debug_sync.unwrap_or_else(|| {
107 panic!("Couldn't find lock call symbol in trace {:?}", backtrace);
109 (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
113 fn new() -> Arc<LockMetadata> {
114 let backtrace = Backtrace::new();
115 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
117 let res = Arc::new(LockMetadata {
118 locked_before: StdMutex::new(new_hash_map()),
120 _lock_construction_bt: backtrace,
123 #[cfg(feature = "backtrace")]
125 let (lock_constr_location, lock_constr_colno) =
126 locate_call_symbol(&res._lock_construction_bt);
127 LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); } });
128 let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
129 match locks.entry(lock_constr_location) {
130 hash_map::Entry::Occupied(e) => {
131 assert_eq!(lock_constr_colno,
132 locate_call_symbol(&e.get()._lock_construction_bt).1,
133 "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
134 return Arc::clone(e.get())
136 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
142 fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
143 LOCKS_HELD.with(|held| {
144 // For each lock that is currently held, check that no lock's `locked_before` set
145 // includes the lock we're about to hold, which would imply a lockorder inversion.
146 for (locked_idx, _locked) in held.borrow().iter() {
147 if *locked_idx == this.lock_idx {
148 // Note that with `feature = "backtrace"` set, we may be looking at different
149 // instances of the same lock. Still, doing so is quite risky, a total order
150 // must be maintained, and doing so across a set of otherwise-identical mutexes
151 // is fraught with issues.
152 #[cfg(feature = "backtrace")]
153 debug_assert!(_double_lock_self_allowed,
154 "Tried to acquire a lock while it was held!\nLock constructed at {}",
155 locate_call_symbol(&this._lock_construction_bt).0);
156 #[cfg(not(feature = "backtrace"))]
157 panic!("Tried to acquire a lock while it was held!");
160 for (_locked_idx, locked) in held.borrow().iter() {
161 for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
162 let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
163 let has_same_construction = *locked_dep_idx == locked.lock_idx;
164 if is_dep_this_lock && !has_same_construction {
165 #[allow(unused_mut, unused_assignments)]
166 let mut has_same_callsite = false;
167 #[cfg(feature = "backtrace")] {
168 has_same_callsite = _double_lock_self_allowed &&
169 locate_call_symbol(&_locked_dep._lockdep_trace) ==
170 locate_call_symbol(&Backtrace::new());
172 if !has_same_callsite {
173 #[cfg(feature = "backtrace")]
174 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
175 locate_call_symbol(&this._lock_construction_bt).0,
176 this.lock_idx, this._lock_construction_bt,
177 locate_call_symbol(&locked._lock_construction_bt).0,
178 locked.lock_idx, locked._lock_construction_bt,
179 _locked_dep._lockdep_trace);
180 #[cfg(not(feature = "backtrace"))]
181 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
185 // Insert any already-held locks in our locked-before set.
186 let mut locked_before = this.locked_before.lock().unwrap();
187 if !locked_before.contains_key(&locked.lock_idx) {
188 let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
189 locked_before.insert(lockdep.lock.lock_idx, lockdep);
192 held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
196 fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
197 let mut res = LockHeldState::NotHeldByThread;
198 LOCKS_HELD.with(|held| {
199 for (locked_idx, _locked) in held.borrow().iter() {
200 if *locked_idx == this.lock_idx {
201 res = LockHeldState::HeldByThread;
208 fn try_locked(this: &Arc<LockMetadata>) {
209 LOCKS_HELD.with(|held| {
210 // Since a try-lock will simply fail if the lock is held already, we do not
211 // consider try-locks to ever generate lockorder inversions. However, if a try-lock
212 // succeeds, we do consider it to have created lockorder dependencies.
213 let mut locked_before = this.locked_before.lock().unwrap();
214 for (locked_idx, locked) in held.borrow().iter() {
215 if !locked_before.contains_key(locked_idx) {
216 let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
217 locked_before.insert(*locked_idx, lockdep);
220 held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
225 pub struct Mutex<T: Sized> {
227 deps: Arc<LockMetadata>,
229 impl<T: Sized> Mutex<T> {
230 pub(crate) fn into_inner(self) -> LockResult<T> {
231 self.inner.into_inner().map_err(|_| ())
235 #[must_use = "if unused the Mutex will immediately unlock"]
236 pub struct MutexGuard<'a, T: Sized + 'a> {
238 lock: StdMutexGuard<'a, T>,
241 impl<'a, T: Sized> MutexGuard<'a, T> {
242 fn into_inner(self) -> StdMutexGuard<'a, T> {
243 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
245 let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
246 std::mem::forget(self);
252 impl<T: Sized> Drop for MutexGuard<'_, T> {
254 LOCKS_HELD.with(|held| {
255 held.borrow_mut().remove(&self.mutex.deps.lock_idx);
260 impl<T: Sized> Deref for MutexGuard<'_, T> {
263 fn deref(&self) -> &T {
268 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
269 fn deref_mut(&mut self) -> &mut T {
270 self.lock.deref_mut()
275 pub fn new(inner: T) -> Mutex<T> {
276 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
279 pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
280 LockMetadata::pre_lock(&self.deps, false);
281 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
284 pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
285 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
287 LockMetadata::try_locked(&self.deps);
293 impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
295 fn held_by_thread(&self) -> LockHeldState {
296 LockMetadata::held_by_thread(&self.deps)
298 type ExclLock = MutexGuard<'a, T>;
300 fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
301 LockMetadata::pre_lock(&self.deps, true);
302 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
306 pub struct RwLock<T: Sized> {
308 deps: Arc<LockMetadata>,
311 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
313 guard: StdRwLockReadGuard<'a, T>,
316 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
318 guard: StdRwLockWriteGuard<'a, T>,
321 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
324 fn deref(&self) -> &T {
329 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
331 LOCKS_HELD.with(|held| {
332 held.borrow_mut().remove(&self.lock.deps.lock_idx);
337 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
340 fn deref(&self) -> &T {
345 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
347 LOCKS_HELD.with(|held| {
348 held.borrow_mut().remove(&self.lock.deps.lock_idx);
353 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
354 fn deref_mut(&mut self) -> &mut T {
355 self.guard.deref_mut()
360 pub fn new(inner: T) -> RwLock<T> {
361 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
364 pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
365 // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may
366 // deadlock trying to take a second read lock if another thread is waiting on the write
367 // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees
369 LockMetadata::pre_lock(&self.deps, false);
370 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
373 pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
374 LockMetadata::pre_lock(&self.deps, false);
375 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
378 pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
379 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
381 LockMetadata::try_locked(&self.deps);
387 impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
389 fn held_by_thread(&self) -> LockHeldState {
390 LockMetadata::held_by_thread(&self.deps)
392 type ExclLock = RwLockWriteGuard<'a, T>;
394 fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
395 LockMetadata::pre_lock(&self.deps, true);
396 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap()
400 pub type FairRwLock<T> = RwLock<T>;