1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
5 use std::collections::HashSet;
6 use std::cell::RefCell;
8 use std::sync::atomic::{AtomicUsize, Ordering};
10 use std::sync::Mutex as StdMutex;
11 use std::sync::MutexGuard as StdMutexGuard;
12 use std::sync::RwLock as StdRwLock;
13 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
14 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
15 use std::sync::Condvar as StdCondvar;
17 #[cfg(feature = "backtrace")]
18 use backtrace::Backtrace;
20 pub type LockResult<Guard> = Result<Guard, ()>;
27 pub fn new() -> Condvar {
28 Condvar { inner: StdCondvar::new() }
31 pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
32 let mutex: &'a Mutex<T> = guard.mutex;
33 self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
37 pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
38 let mutex = guard.mutex;
39 self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
42 pub fn notify_all(&self) { self.inner.notify_all(); }
46 /// We track the set of locks currently held by a reference to their `LockMetadata`
47 static LOCKS_HELD: RefCell<HashSet<Arc<LockMetadata>>> = RefCell::new(HashSet::new());
49 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
51 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
52 /// when the Mutex itself was constructed.
55 locked_before: StdMutex<HashSet<Arc<LockMetadata>>>,
56 #[cfg(feature = "backtrace")]
57 lock_construction_bt: Backtrace,
59 impl PartialEq for LockMetadata {
60 fn eq(&self, o: &LockMetadata) -> bool { self.lock_idx == o.lock_idx }
62 impl Eq for LockMetadata {}
63 impl std::hash::Hash for LockMetadata {
64 fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); }
68 fn new() -> LockMetadata {
70 locked_before: StdMutex::new(HashSet::new()),
71 lock_idx: LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64,
72 #[cfg(feature = "backtrace")]
73 lock_construction_bt: Backtrace::new(),
77 // Returns whether we were a recursive lock (only relevant for read)
78 fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
79 let mut inserted = false;
80 LOCKS_HELD.with(|held| {
81 // For each lock which is currently locked, check that no lock's locked-before
82 // set includes the lock we're about to lock, which would imply a lockorder
84 for locked in held.borrow().iter() {
85 if read && *locked == *this {
86 // Recursive read locks are explicitly allowed
90 for locked in held.borrow().iter() {
91 if !read && *locked == *this {
92 panic!("Tried to lock a lock while it was held!");
94 for locked_dep in locked.locked_before.lock().unwrap().iter() {
95 if *locked_dep == *this {
96 #[cfg(feature = "backtrace")]
97 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n{:?}", locked.lock_construction_bt);
98 #[cfg(not(feature = "backtrace"))]
99 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
102 // Insert any already-held locks in our locked-before set.
103 this.locked_before.lock().unwrap().insert(Arc::clone(locked));
105 held.borrow_mut().insert(Arc::clone(this));
111 fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
112 fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
114 fn try_locked(this: &Arc<LockMetadata>) {
115 LOCKS_HELD.with(|held| {
116 // Since a try-lock will simply fail if the lock is held already, we do not
117 // consider try-locks to ever generate lockorder inversions. However, if a try-lock
118 // succeeds, we do consider it to have created lockorder dependencies.
119 for locked in held.borrow().iter() {
120 this.locked_before.lock().unwrap().insert(Arc::clone(locked));
122 held.borrow_mut().insert(Arc::clone(this));
127 pub struct Mutex<T: Sized> {
129 deps: Arc<LockMetadata>,
132 #[must_use = "if unused the Mutex will immediately unlock"]
133 pub struct MutexGuard<'a, T: Sized + 'a> {
135 lock: StdMutexGuard<'a, T>,
138 impl<'a, T: Sized> MutexGuard<'a, T> {
139 fn into_inner(self) -> StdMutexGuard<'a, T> {
140 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
142 let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
143 std::mem::forget(self);
149 impl<T: Sized> Drop for MutexGuard<'_, T> {
151 LOCKS_HELD.with(|held| {
152 held.borrow_mut().remove(&self.mutex.deps);
157 impl<T: Sized> Deref for MutexGuard<'_, T> {
160 fn deref(&self) -> &T {
165 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
166 fn deref_mut(&mut self) -> &mut T {
167 self.lock.deref_mut()
172 pub fn new(inner: T) -> Mutex<T> {
173 Mutex { inner: StdMutex::new(inner), deps: Arc::new(LockMetadata::new()) }
176 pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
177 LockMetadata::pre_lock(&self.deps);
178 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
181 pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
182 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
184 LockMetadata::try_locked(&self.deps);
190 pub struct RwLock<T: Sized> {
192 deps: Arc<LockMetadata>,
195 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
198 guard: StdRwLockReadGuard<'a, T>,
201 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
203 guard: StdRwLockWriteGuard<'a, T>,
206 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
209 fn deref(&self) -> &T {
214 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
216 if !self.first_lock {
217 // Note that its not strictly true that the first taken read lock will get unlocked
218 // last, but in practice our locks are always taken as RAII, so it should basically
222 LOCKS_HELD.with(|held| {
223 held.borrow_mut().remove(&self.lock.deps);
228 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
231 fn deref(&self) -> &T {
236 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
238 LOCKS_HELD.with(|held| {
239 held.borrow_mut().remove(&self.lock.deps);
244 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
245 fn deref_mut(&mut self) -> &mut T {
246 self.guard.deref_mut()
251 pub fn new(inner: T) -> RwLock<T> {
252 RwLock { inner: StdRwLock::new(inner), deps: Arc::new(LockMetadata::new()) }
255 pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
256 let first_lock = LockMetadata::pre_read_lock(&self.deps);
257 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
260 pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
261 LockMetadata::pre_lock(&self.deps);
262 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
265 pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
266 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
268 LockMetadata::try_locked(&self.deps);
276 fn recursive_lock_fail() {
277 let mutex = Mutex::new(());
278 let _a = mutex.lock().unwrap();
279 let _b = mutex.lock().unwrap();
283 fn recursive_read() {
284 let lock = RwLock::new(());
285 let _a = lock.read().unwrap();
286 let _b = lock.read().unwrap();
291 fn lockorder_fail() {
292 let a = Mutex::new(());
293 let b = Mutex::new(());
295 let _a = a.lock().unwrap();
296 let _b = b.lock().unwrap();
299 let _b = b.lock().unwrap();
300 let _a = a.lock().unwrap();
306 fn write_lockorder_fail() {
307 let a = RwLock::new(());
308 let b = RwLock::new(());
310 let _a = a.write().unwrap();
311 let _b = b.write().unwrap();
314 let _b = b.write().unwrap();
315 let _a = a.write().unwrap();
321 fn read_lockorder_fail() {
322 let a = RwLock::new(());
323 let b = RwLock::new(());
325 let _a = a.read().unwrap();
326 let _b = b.read().unwrap();
329 let _b = b.read().unwrap();
330 let _a = a.read().unwrap();
335 fn read_recurisve_no_lockorder() {
336 // Like the above, but note that no lockorder is implied when we recursively read-lock a
337 // RwLock, causing this to pass just fine.
338 let a = RwLock::new(());
339 let b = RwLock::new(());
340 let _outer = a.read().unwrap();
342 let _a = a.read().unwrap();
343 let _b = b.read().unwrap();
346 let _b = b.read().unwrap();
347 let _a = a.read().unwrap();
353 fn read_write_lockorder_fail() {
354 let a = RwLock::new(());
355 let b = RwLock::new(());
357 let _a = a.write().unwrap();
358 let _b = b.read().unwrap();
361 let _b = b.read().unwrap();
362 let _a = a.write().unwrap();
366 pub type FairRwLock<T> = RwLock<T>;