1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
5 use std::cell::RefCell;
7 use std::sync::atomic::{AtomicUsize, Ordering};
8 use std::sync::Mutex as StdMutex;
9 use std::sync::MutexGuard as StdMutexGuard;
10 use std::sync::RwLock as StdRwLock;
11 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
12 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
13 use std::sync::Condvar as StdCondvar;
15 use crate::prelude::HashMap;
17 use super::{LockTestExt, LockHeldState};
19 #[cfg(feature = "backtrace")]
20 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
22 #[cfg(not(feature = "backtrace"))]
24 #[cfg(not(feature = "backtrace"))]
25 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
27 pub type LockResult<Guard> = Result<Guard, ()>;
34 pub fn new() -> Condvar {
35 Condvar { inner: StdCondvar::new() }
38 pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
39 let mutex: &'a Mutex<T> = guard.mutex;
40 self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
44 pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
45 let mutex = guard.mutex;
46 self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
49 pub fn notify_all(&self) { self.inner.notify_all(); }
53 /// We track the set of locks currently held by a reference to their `LockMetadata`
54 static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
56 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
58 #[cfg(feature = "backtrace")]
59 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
60 #[cfg(feature = "backtrace")]
61 static LOCKS_INIT: Once = Once::new();
63 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
64 /// when the Mutex itself was constructed.
67 locked_before: StdMutex<HashMap<u64, LockDep>>,
68 _lock_construction_bt: Backtrace,
72 lock: Arc<LockMetadata>,
73 /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
74 _lockdep_trace: Backtrace,
77 #[cfg(feature = "backtrace")]
78 fn get_construction_location(backtrace: &Backtrace) -> (String, Option<u32>) {
79 // Find the first frame that is after `debug_sync` (or that is in our tests) and use
80 // that as the mutex construction site. Note that the first few frames may be in
81 // the `backtrace` crate, so we have to ignore those.
82 let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
83 let mut found_debug_sync = false;
84 for frame in backtrace.frames() {
85 for symbol in frame.symbols() {
86 let symbol_name = symbol.name().unwrap().as_str().unwrap();
87 if !sync_mutex_constr_regex.is_match(symbol_name) {
89 return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno());
91 } else { found_debug_sync = true; }
94 panic!("Couldn't find mutex construction callsite");
98 fn new() -> Arc<LockMetadata> {
99 let backtrace = Backtrace::new();
100 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
102 let res = Arc::new(LockMetadata {
103 locked_before: StdMutex::new(HashMap::new()),
105 _lock_construction_bt: backtrace,
108 #[cfg(feature = "backtrace")]
110 let (lock_constr_location, lock_constr_colno) =
111 get_construction_location(&res._lock_construction_bt);
112 LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
113 let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
114 match locks.entry(lock_constr_location) {
115 hash_map::Entry::Occupied(e) => {
116 assert_eq!(lock_constr_colno,
117 get_construction_location(&e.get()._lock_construction_bt).1,
118 "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
119 return Arc::clone(e.get())
121 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
127 fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
128 LOCKS_HELD.with(|held| {
129 // For each lock which is currently locked, check that no lock's locked-before
130 // set includes the lock we're about to lock, which would imply a lockorder
132 for (locked_idx, _locked) in held.borrow().iter() {
133 if *locked_idx == this.lock_idx {
134 // Note that with `feature = "backtrace"` set, we may be looking at different
135 // instances of the same lock. Still, doing so is quite risky, a total order
136 // must be maintained, and doing so across a set of otherwise-identical mutexes
137 // is fraught with issues.
138 #[cfg(feature = "backtrace")]
139 debug_assert!(_double_lock_self_allowed,
140 "Tried to acquire a lock while it was held!\nLock constructed at {}",
141 get_construction_location(&this._lock_construction_bt).0);
142 #[cfg(not(feature = "backtrace"))]
143 panic!("Tried to acquire a lock while it was held!");
146 for (_locked_idx, locked) in held.borrow().iter() {
147 for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
148 if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
149 #[cfg(feature = "backtrace")]
150 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
151 get_construction_location(&this._lock_construction_bt).0,
152 this.lock_idx, this._lock_construction_bt,
153 get_construction_location(&locked._lock_construction_bt).0,
154 locked.lock_idx, locked._lock_construction_bt,
155 _locked_dep._lockdep_trace);
156 #[cfg(not(feature = "backtrace"))]
157 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
160 // Insert any already-held locks in our locked-before set.
161 let mut locked_before = this.locked_before.lock().unwrap();
162 if !locked_before.contains_key(&locked.lock_idx) {
163 let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
164 locked_before.insert(lockdep.lock.lock_idx, lockdep);
167 held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
171 fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
172 let mut res = LockHeldState::NotHeldByThread;
173 LOCKS_HELD.with(|held| {
174 for (locked_idx, _locked) in held.borrow().iter() {
175 if *locked_idx == this.lock_idx {
176 res = LockHeldState::HeldByThread;
183 fn try_locked(this: &Arc<LockMetadata>) {
184 LOCKS_HELD.with(|held| {
185 // Since a try-lock will simply fail if the lock is held already, we do not
186 // consider try-locks to ever generate lockorder inversions. However, if a try-lock
187 // succeeds, we do consider it to have created lockorder dependencies.
188 let mut locked_before = this.locked_before.lock().unwrap();
189 for (locked_idx, locked) in held.borrow().iter() {
190 if !locked_before.contains_key(locked_idx) {
191 let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
192 locked_before.insert(*locked_idx, lockdep);
195 held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
200 pub struct Mutex<T: Sized> {
202 deps: Arc<LockMetadata>,
204 impl<T: Sized> Mutex<T> {
205 pub(crate) fn into_inner(self) -> LockResult<T> {
206 self.inner.into_inner().map_err(|_| ())
210 #[must_use = "if unused the Mutex will immediately unlock"]
211 pub struct MutexGuard<'a, T: Sized + 'a> {
213 lock: StdMutexGuard<'a, T>,
216 impl<'a, T: Sized> MutexGuard<'a, T> {
217 fn into_inner(self) -> StdMutexGuard<'a, T> {
218 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
220 let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
221 std::mem::forget(self);
227 impl<T: Sized> Drop for MutexGuard<'_, T> {
229 LOCKS_HELD.with(|held| {
230 held.borrow_mut().remove(&self.mutex.deps.lock_idx);
235 impl<T: Sized> Deref for MutexGuard<'_, T> {
238 fn deref(&self) -> &T {
243 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
244 fn deref_mut(&mut self) -> &mut T {
245 self.lock.deref_mut()
250 pub fn new(inner: T) -> Mutex<T> {
251 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
254 pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
255 LockMetadata::pre_lock(&self.deps, false);
256 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
259 pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
260 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
262 LockMetadata::try_locked(&self.deps);
268 impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
270 fn held_by_thread(&self) -> LockHeldState {
271 LockMetadata::held_by_thread(&self.deps)
273 type ExclLock = MutexGuard<'a, T>;
275 fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
276 LockMetadata::pre_lock(&self.deps, true);
277 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
281 pub struct RwLock<T: Sized> {
283 deps: Arc<LockMetadata>,
286 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
288 guard: StdRwLockReadGuard<'a, T>,
291 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
293 guard: StdRwLockWriteGuard<'a, T>,
296 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
299 fn deref(&self) -> &T {
304 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
306 LOCKS_HELD.with(|held| {
307 held.borrow_mut().remove(&self.lock.deps.lock_idx);
312 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
315 fn deref(&self) -> &T {
320 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
322 LOCKS_HELD.with(|held| {
323 held.borrow_mut().remove(&self.lock.deps.lock_idx);
328 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
329 fn deref_mut(&mut self) -> &mut T {
330 self.guard.deref_mut()
335 pub fn new(inner: T) -> RwLock<T> {
336 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
339 pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
340 // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may
341 // deadlock trying to take a second read lock if another thread is waiting on the write
342 // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees
344 LockMetadata::pre_lock(&self.deps, false);
345 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
348 pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
349 LockMetadata::pre_lock(&self.deps, false);
350 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
353 pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
354 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
356 LockMetadata::try_locked(&self.deps);
362 impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
364 fn held_by_thread(&self) -> LockHeldState {
365 LockMetadata::held_by_thread(&self.deps)
367 type ExclLock = RwLockWriteGuard<'a, T>;
369 fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
370 LockMetadata::pre_lock(&self.deps, true);
371 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap()
375 pub type FairRwLock<T> = RwLock<T>;