]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/debug_sync.rs
13a3639e61d30130a0b1ffb685abe3043ce420ba
[rust-lightning] / lightning / src / debug_sync.rs
1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
4
5 use std::cell::RefCell;
6
7 use std::sync::atomic::{AtomicUsize, Ordering};
8 use std::sync::Mutex as StdMutex;
9 use std::sync::MutexGuard as StdMutexGuard;
10 use std::sync::RwLock as StdRwLock;
11 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
12 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
13 use std::sync::Condvar as StdCondvar;
14
15 use crate::prelude::HashMap;
16
17 #[cfg(feature = "backtrace")]
18 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
19
20 #[cfg(not(feature = "backtrace"))]
21 struct Backtrace{}
22 #[cfg(not(feature = "backtrace"))]
23 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
24
25 pub type LockResult<Guard> = Result<Guard, ()>;
26
27 pub struct Condvar {
28         inner: StdCondvar,
29 }
30
31 impl Condvar {
32         pub fn new() -> Condvar {
33                 Condvar { inner: StdCondvar::new() }
34         }
35
36         pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
37                 let mutex: &'a Mutex<T> = guard.mutex;
38                 self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
39         }
40
41         #[allow(unused)]
42         pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
43                 let mutex = guard.mutex;
44                 self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
45         }
46
47         pub fn notify_all(&self) { self.inner.notify_all(); }
48 }
49
50 thread_local! {
51         /// We track the set of locks currently held by a reference to their `LockMetadata`
52         static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
53 }
54 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
55
56 #[cfg(feature = "backtrace")]
57 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
58 #[cfg(feature = "backtrace")]
59 static LOCKS_INIT: Once = Once::new();
60
61 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
62 /// when the Mutex itself was constructed.
63 struct LockMetadata {
64         lock_idx: u64,
65         locked_before: StdMutex<HashMap<u64, LockDep>>,
66         _lock_construction_bt: Backtrace,
67 }
68
69 struct LockDep {
70         lock: Arc<LockMetadata>,
71         /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
72         _lockdep_trace: Backtrace,
73 }
74
75 #[cfg(feature = "backtrace")]
76 fn get_construction_location(backtrace: &Backtrace) -> String {
77         // Find the first frame that is after `debug_sync` (or that is in our tests) and use
78         // that as the mutex construction site. Note that the first few frames may be in
79         // the `backtrace` crate, so we have to ignore those.
80         let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
81         let mut found_debug_sync = false;
82         for frame in backtrace.frames() {
83                 for symbol in frame.symbols() {
84                         let symbol_name = symbol.name().unwrap().as_str().unwrap();
85                         if !sync_mutex_constr_regex.is_match(symbol_name) {
86                                 if found_debug_sync {
87                                         let res = if let Some(col) = symbol.colno() {
88                                                 format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col)
89                                         } else {
90                                                 // Windows debug symbols don't support column numbers, so fall back to
91                                                 // line numbers only if no `colno` is available
92                                                 format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap())
93                                         };
94 eprintln!("{}: {}", res, symbol_name);
95 return res;
96                                 }
97                         } else { found_debug_sync = true; }
98                 }
99         }
100         panic!("Couldn't find mutex construction callsite");
101 }
102
103 impl LockMetadata {
104         fn new() -> Arc<LockMetadata> {
105                 let backtrace = Backtrace::new();
106                 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
107
108                 let res = Arc::new(LockMetadata {
109                         locked_before: StdMutex::new(HashMap::new()),
110                         lock_idx,
111                         _lock_construction_bt: backtrace,
112                 });
113
114                 #[cfg(feature = "backtrace")]
115                 {
116                         let lock_constr_location = get_construction_location(&res._lock_construction_bt);
117                         LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
118                         let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
119                         match locks.entry(lock_constr_location) {
120                                 hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
121                                 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
122                         }
123                 }
124                 res
125         }
126
127         // Returns whether we were a recursive lock (only relevant for read)
128         fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
129                 let mut inserted = false;
130                 LOCKS_HELD.with(|held| {
131                         // For each lock which is currently locked, check that no lock's locked-before
132                         // set includes the lock we're about to lock, which would imply a lockorder
133                         // inversion.
134                         for (locked_idx, _locked) in held.borrow().iter() {
135                                 if read && *locked_idx == this.lock_idx {
136                                         // Recursive read locks are explicitly allowed
137                                         return;
138                                 }
139                         }
140                         for (locked_idx, locked) in held.borrow().iter() {
141                                 if !read && *locked_idx == this.lock_idx {
142                                         // With `feature = "backtrace"` set, we may be looking at different instances
143                                         // of the same lock.
144                                         debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
145                                 }
146                                 for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
147                                         if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
148                                                 #[cfg(feature = "backtrace")]
149                                                 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
150                                                         get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
151                                                         get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
152                                                         _locked_dep._lockdep_trace);
153                                                 #[cfg(not(feature = "backtrace"))]
154                                                 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
155                                         }
156                                 }
157                                 // Insert any already-held locks in our locked-before set.
158                                 let mut locked_before = this.locked_before.lock().unwrap();
159                                 if !locked_before.contains_key(&locked.lock_idx) {
160                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
161                                         locked_before.insert(lockdep.lock.lock_idx, lockdep);
162                                 }
163                         }
164                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
165                         inserted = true;
166                 });
167                 inserted
168         }
169
170         fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
171         fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
172
173         fn try_locked(this: &Arc<LockMetadata>) {
174                 LOCKS_HELD.with(|held| {
175                         // Since a try-lock will simply fail if the lock is held already, we do not
176                         // consider try-locks to ever generate lockorder inversions. However, if a try-lock
177                         // succeeds, we do consider it to have created lockorder dependencies.
178                         let mut locked_before = this.locked_before.lock().unwrap();
179                         for (locked_idx, locked) in held.borrow().iter() {
180                                 if !locked_before.contains_key(locked_idx) {
181                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
182                                         locked_before.insert(*locked_idx, lockdep);
183                                 }
184                         }
185                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
186                 });
187         }
188 }
189
190 pub struct Mutex<T: Sized> {
191         inner: StdMutex<T>,
192         deps: Arc<LockMetadata>,
193 }
194
195 #[must_use = "if unused the Mutex will immediately unlock"]
196 pub struct MutexGuard<'a, T: Sized + 'a> {
197         mutex: &'a Mutex<T>,
198         lock: StdMutexGuard<'a, T>,
199 }
200
201 impl<'a, T: Sized> MutexGuard<'a, T> {
202         fn into_inner(self) -> StdMutexGuard<'a, T> {
203                 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
204                 unsafe {
205                         let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
206                         std::mem::forget(self);
207                         v
208                 }
209         }
210 }
211
212 impl<T: Sized> Drop for MutexGuard<'_, T> {
213         fn drop(&mut self) {
214                 LOCKS_HELD.with(|held| {
215                         held.borrow_mut().remove(&self.mutex.deps.lock_idx);
216                 });
217         }
218 }
219
220 impl<T: Sized> Deref for MutexGuard<'_, T> {
221         type Target = T;
222
223         fn deref(&self) -> &T {
224                 &self.lock.deref()
225         }
226 }
227
228 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
229         fn deref_mut(&mut self) -> &mut T {
230                 self.lock.deref_mut()
231         }
232 }
233
234 impl<T> Mutex<T> {
235         pub fn new(inner: T) -> Mutex<T> {
236                 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
237         }
238
239         pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
240                 LockMetadata::pre_lock(&self.deps);
241                 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
242         }
243
244         pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
245                 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
246                 if res.is_ok() {
247                         LockMetadata::try_locked(&self.deps);
248                 }
249                 res
250         }
251 }
252
253 pub struct RwLock<T: Sized> {
254         inner: StdRwLock<T>,
255         deps: Arc<LockMetadata>,
256 }
257
258 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
259         lock: &'a RwLock<T>,
260         first_lock: bool,
261         guard: StdRwLockReadGuard<'a, T>,
262 }
263
264 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
265         lock: &'a RwLock<T>,
266         guard: StdRwLockWriteGuard<'a, T>,
267 }
268
269 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
270         type Target = T;
271
272         fn deref(&self) -> &T {
273                 &self.guard.deref()
274         }
275 }
276
277 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
278         fn drop(&mut self) {
279                 if !self.first_lock {
280                         // Note that its not strictly true that the first taken read lock will get unlocked
281                         // last, but in practice our locks are always taken as RAII, so it should basically
282                         // always be true.
283                         return;
284                 }
285                 LOCKS_HELD.with(|held| {
286                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
287                 });
288         }
289 }
290
291 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
292         type Target = T;
293
294         fn deref(&self) -> &T {
295                 &self.guard.deref()
296         }
297 }
298
299 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
300         fn drop(&mut self) {
301                 LOCKS_HELD.with(|held| {
302                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
303                 });
304         }
305 }
306
307 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
308         fn deref_mut(&mut self) -> &mut T {
309                 self.guard.deref_mut()
310         }
311 }
312
313 impl<T> RwLock<T> {
314         pub fn new(inner: T) -> RwLock<T> {
315                 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
316         }
317
318         pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
319                 let first_lock = LockMetadata::pre_read_lock(&self.deps);
320                 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
321         }
322
323         pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
324                 LockMetadata::pre_lock(&self.deps);
325                 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
326         }
327
328         pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
329                 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
330                 if res.is_ok() {
331                         LockMetadata::try_locked(&self.deps);
332                 }
333                 res
334         }
335 }
336
337 pub type FairRwLock<T> = RwLock<T>;
338
339 mod tests {
340         use super::{RwLock, Mutex};
341
342         #[test]
343         #[should_panic]
344         #[cfg(not(feature = "backtrace"))]
345         fn recursive_lock_fail() {
346                 let mutex = Mutex::new(());
347                 let _a = mutex.lock().unwrap();
348                 let _b = mutex.lock().unwrap();
349         }
350
351         #[test]
352         fn recursive_read() {
353                 let lock = RwLock::new(());
354                 let _a = lock.read().unwrap();
355                 let _b = lock.read().unwrap();
356         }
357
358         #[test]
359         #[should_panic]
360         fn lockorder_fail() {
361                 let a = Mutex::new(());
362                 let b = Mutex::new(());
363                 {
364                         let _a = a.lock().unwrap();
365                         let _b = b.lock().unwrap();
366                 }
367                 {
368                         let _b = b.lock().unwrap();
369                         let _a = a.lock().unwrap();
370                 }
371         }
372
373         #[test]
374         #[should_panic]
375         fn write_lockorder_fail() {
376                 let a = RwLock::new(());
377                 let b = RwLock::new(());
378                 {
379                         let _a = a.write().unwrap();
380                         let _b = b.write().unwrap();
381                 }
382                 {
383                         let _b = b.write().unwrap();
384                         let _a = a.write().unwrap();
385                 }
386         }
387
388         #[test]
389         #[should_panic]
390         fn read_lockorder_fail() {
391                 let a = RwLock::new(());
392                 let b = RwLock::new(());
393                 {
394                         let _a = a.read().unwrap();
395                         let _b = b.read().unwrap();
396                 }
397                 {
398                         let _b = b.read().unwrap();
399                         let _a = a.read().unwrap();
400                 }
401         }
402
403         #[test]
404         fn read_recursive_no_lockorder() {
405                 // Like the above, but note that no lockorder is implied when we recursively read-lock a
406                 // RwLock, causing this to pass just fine.
407                 let a = RwLock::new(());
408                 let b = RwLock::new(());
409                 let _outer = a.read().unwrap();
410                 {
411                         let _a = a.read().unwrap();
412                         let _b = b.read().unwrap();
413                 }
414                 {
415                         let _b = b.read().unwrap();
416                         let _a = a.read().unwrap();
417                 }
418         }
419
420         #[test]
421         #[should_panic]
422         fn read_write_lockorder_fail() {
423                 let a = RwLock::new(());
424                 let b = RwLock::new(());
425                 {
426                         let _a = a.write().unwrap();
427                         let _b = b.read().unwrap();
428                 }
429                 {
430                         let _b = b.read().unwrap();
431                         let _a = a.write().unwrap();
432                 }
433         }
434 }