6f5f532013e2fcce6a52d6fe8c6485f5a0af51b7
[rust-lightning] / lightning / src / debug_sync.rs
1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
4
5 use std::collections::HashSet;
6 use std::cell::RefCell;
7
8 use std::sync::atomic::{AtomicUsize, Ordering};
9 use std::sync::Mutex as StdMutex;
10 use std::sync::MutexGuard as StdMutexGuard;
11 use std::sync::RwLock as StdRwLock;
12 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
13 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
14 use std::sync::Condvar as StdCondvar;
15
16 #[cfg(feature = "backtrace")]
17 use {prelude::{HashMap, hash_map}, backtrace::Backtrace, std::sync::Once};
18
19 #[cfg(not(feature = "backtrace"))]
20 struct Backtrace{}
21 #[cfg(not(feature = "backtrace"))]
22 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
23
24 pub type LockResult<Guard> = Result<Guard, ()>;
25
26 pub struct Condvar {
27         inner: StdCondvar,
28 }
29
30 impl Condvar {
31         pub fn new() -> Condvar {
32                 Condvar { inner: StdCondvar::new() }
33         }
34
35         pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
36                 let mutex: &'a Mutex<T> = guard.mutex;
37                 self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
38         }
39
40         #[allow(unused)]
41         pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
42                 let mutex = guard.mutex;
43                 self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
44         }
45
46         pub fn notify_all(&self) { self.inner.notify_all(); }
47 }
48
49 thread_local! {
50         /// We track the set of locks currently held by a reference to their `LockMetadata`
51         static LOCKS_HELD: RefCell<HashSet<Arc<LockMetadata>>> = RefCell::new(HashSet::new());
52 }
53 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
54
55 #[cfg(feature = "backtrace")]
56 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
57 #[cfg(feature = "backtrace")]
58 static LOCKS_INIT: Once = Once::new();
59
60 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
61 /// when the Mutex itself was constructed.
62 struct LockMetadata {
63         lock_idx: u64,
64         locked_before: StdMutex<HashSet<LockDep>>,
65         _lock_construction_bt: Backtrace,
66 }
67 impl PartialEq for LockMetadata {
68         fn eq(&self, o: &LockMetadata) -> bool { self.lock_idx == o.lock_idx }
69 }
70 impl Eq for LockMetadata {}
71 impl std::hash::Hash for LockMetadata {
72         fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock_idx); }
73 }
74
75 struct LockDep {
76         lock: Arc<LockMetadata>,
77         lockdep_trace: Option<Backtrace>,
78 }
79 impl LockDep {
80         /// Note that `Backtrace::new()` is rather expensive so we rely on the caller to fill in the
81         /// `lockdep_backtrace` field after ensuring we need it.
82         fn new_without_bt(lock: &Arc<LockMetadata>) -> Self {
83                 Self { lock: Arc::clone(lock), lockdep_trace: None }
84         }
85 }
86 impl PartialEq for LockDep {
87         fn eq(&self, o: &LockDep) -> bool { self.lock.lock_idx == o.lock.lock_idx }
88 }
89 impl Eq for LockDep {}
90 impl std::hash::Hash for LockDep {
91         fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) { hasher.write_u64(self.lock.lock_idx); }
92 }
93
94 #[cfg(feature = "backtrace")]
95 fn get_construction_location(backtrace: &Backtrace) -> String {
96         // Find the first frame that is after `debug_sync` (or that is in our tests) and use
97         // that as the mutex construction site. Note that the first few frames may be in
98         // the `backtrace` crate, so we have to ignore those.
99         let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
100         let mut found_debug_sync = false;
101         for frame in backtrace.frames() {
102                 for symbol in frame.symbols() {
103                         let symbol_name = symbol.name().unwrap().as_str().unwrap();
104                         if !sync_mutex_constr_regex.is_match(symbol_name) {
105                                 if found_debug_sync {
106                                         if let Some(col) = symbol.colno() {
107                                                 return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col);
108                                         } else {
109                                                 // Windows debug symbols don't support column numbers, so fall back to
110                                                 // line numbers only if no `colno` is available
111                                                 return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap());
112                                         }
113                                 }
114                         } else { found_debug_sync = true; }
115                 }
116         }
117         panic!("Couldn't find mutex construction callsite");
118 }
119
120 impl LockMetadata {
121         fn new() -> Arc<LockMetadata> {
122                 let backtrace = Backtrace::new();
123                 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
124
125                 let res = Arc::new(LockMetadata {
126                         locked_before: StdMutex::new(HashSet::new()),
127                         lock_idx,
128                         _lock_construction_bt: backtrace,
129                 });
130
131                 #[cfg(feature = "backtrace")]
132                 {
133                         let lock_constr_location = get_construction_location(&res._lock_construction_bt);
134                         LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
135                         let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
136                         match locks.entry(lock_constr_location) {
137                                 hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
138                                 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
139                         }
140                 }
141                 res
142         }
143
144         // Returns whether we were a recursive lock (only relevant for read)
145         fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
146                 let mut inserted = false;
147                 LOCKS_HELD.with(|held| {
148                         // For each lock which is currently locked, check that no lock's locked-before
149                         // set includes the lock we're about to lock, which would imply a lockorder
150                         // inversion.
151                         for locked in held.borrow().iter() {
152                                 if read && *locked == *this {
153                                         // Recursive read locks are explicitly allowed
154                                         return;
155                                 }
156                         }
157                         for locked in held.borrow().iter() {
158                                 if !read && *locked == *this {
159                                         // With `feature = "backtrace"` set, we may be looking at different instances
160                                         // of the same lock.
161                                         debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
162                                 }
163                                 for locked_dep in locked.locked_before.lock().unwrap().iter() {
164                                         if locked_dep.lock == *this && locked_dep.lock != *locked {
165                                                 #[cfg(feature = "backtrace")]
166                                                 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
167                                                         get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
168                                                         get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
169                                                         locked_dep.lockdep_trace);
170                                                 #[cfg(not(feature = "backtrace"))]
171                                                 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
172                                         }
173                                 }
174                                 // Insert any already-held locks in our locked-before set.
175                                 let mut locked_before = this.locked_before.lock().unwrap();
176                                 let mut lockdep = LockDep::new_without_bt(locked);
177                                 if !locked_before.contains(&lockdep) {
178                                         lockdep.lockdep_trace = Some(Backtrace::new());
179                                         locked_before.insert(lockdep);
180                                 }
181                         }
182                         held.borrow_mut().insert(Arc::clone(this));
183                         inserted = true;
184                 });
185                 inserted
186         }
187
188         fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
189         fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
190
191         fn try_locked(this: &Arc<LockMetadata>) {
192                 LOCKS_HELD.with(|held| {
193                         // Since a try-lock will simply fail if the lock is held already, we do not
194                         // consider try-locks to ever generate lockorder inversions. However, if a try-lock
195                         // succeeds, we do consider it to have created lockorder dependencies.
196                         let mut locked_before = this.locked_before.lock().unwrap();
197                         for locked in held.borrow().iter() {
198                                 let mut lockdep = LockDep::new_without_bt(locked);
199                                 if !locked_before.contains(&lockdep) {
200                                         lockdep.lockdep_trace = Some(Backtrace::new());
201                                         locked_before.insert(lockdep);
202                                 }
203                         }
204                         held.borrow_mut().insert(Arc::clone(this));
205                 });
206         }
207 }
208
209 pub struct Mutex<T: Sized> {
210         inner: StdMutex<T>,
211         deps: Arc<LockMetadata>,
212 }
213
214 #[must_use = "if unused the Mutex will immediately unlock"]
215 pub struct MutexGuard<'a, T: Sized + 'a> {
216         mutex: &'a Mutex<T>,
217         lock: StdMutexGuard<'a, T>,
218 }
219
220 impl<'a, T: Sized> MutexGuard<'a, T> {
221         fn into_inner(self) -> StdMutexGuard<'a, T> {
222                 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
223                 unsafe {
224                         let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
225                         std::mem::forget(self);
226                         v
227                 }
228         }
229 }
230
231 impl<T: Sized> Drop for MutexGuard<'_, T> {
232         fn drop(&mut self) {
233                 LOCKS_HELD.with(|held| {
234                         held.borrow_mut().remove(&self.mutex.deps);
235                 });
236         }
237 }
238
239 impl<T: Sized> Deref for MutexGuard<'_, T> {
240         type Target = T;
241
242         fn deref(&self) -> &T {
243                 &self.lock.deref()
244         }
245 }
246
247 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
248         fn deref_mut(&mut self) -> &mut T {
249                 self.lock.deref_mut()
250         }
251 }
252
253 impl<T> Mutex<T> {
254         pub fn new(inner: T) -> Mutex<T> {
255                 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
256         }
257
258         pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
259                 LockMetadata::pre_lock(&self.deps);
260                 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
261         }
262
263         pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
264                 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
265                 if res.is_ok() {
266                         LockMetadata::try_locked(&self.deps);
267                 }
268                 res
269         }
270 }
271
272 pub struct RwLock<T: Sized> {
273         inner: StdRwLock<T>,
274         deps: Arc<LockMetadata>,
275 }
276
277 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
278         lock: &'a RwLock<T>,
279         first_lock: bool,
280         guard: StdRwLockReadGuard<'a, T>,
281 }
282
283 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
284         lock: &'a RwLock<T>,
285         guard: StdRwLockWriteGuard<'a, T>,
286 }
287
288 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
289         type Target = T;
290
291         fn deref(&self) -> &T {
292                 &self.guard.deref()
293         }
294 }
295
296 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
297         fn drop(&mut self) {
298                 if !self.first_lock {
299                         // Note that its not strictly true that the first taken read lock will get unlocked
300                         // last, but in practice our locks are always taken as RAII, so it should basically
301                         // always be true.
302                         return;
303                 }
304                 LOCKS_HELD.with(|held| {
305                         held.borrow_mut().remove(&self.lock.deps);
306                 });
307         }
308 }
309
310 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
311         type Target = T;
312
313         fn deref(&self) -> &T {
314                 &self.guard.deref()
315         }
316 }
317
318 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
319         fn drop(&mut self) {
320                 LOCKS_HELD.with(|held| {
321                         held.borrow_mut().remove(&self.lock.deps);
322                 });
323         }
324 }
325
326 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
327         fn deref_mut(&mut self) -> &mut T {
328                 self.guard.deref_mut()
329         }
330 }
331
332 impl<T> RwLock<T> {
333         pub fn new(inner: T) -> RwLock<T> {
334                 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
335         }
336
337         pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
338                 let first_lock = LockMetadata::pre_read_lock(&self.deps);
339                 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
340         }
341
342         pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
343                 LockMetadata::pre_lock(&self.deps);
344                 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
345         }
346
347         pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
348                 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
349                 if res.is_ok() {
350                         LockMetadata::try_locked(&self.deps);
351                 }
352                 res
353         }
354 }
355
356 pub type FairRwLock<T> = RwLock<T>;
357
358 mod tests {
359         use super::{RwLock, Mutex};
360
361         #[test]
362         #[should_panic]
363         #[cfg(not(feature = "backtrace"))]
364         fn recursive_lock_fail() {
365                 let mutex = Mutex::new(());
366                 let _a = mutex.lock().unwrap();
367                 let _b = mutex.lock().unwrap();
368         }
369
370         #[test]
371         fn recursive_read() {
372                 let lock = RwLock::new(());
373                 let _a = lock.read().unwrap();
374                 let _b = lock.read().unwrap();
375         }
376
377         #[test]
378         #[should_panic]
379         fn lockorder_fail() {
380                 let a = Mutex::new(());
381                 let b = Mutex::new(());
382                 {
383                         let _a = a.lock().unwrap();
384                         let _b = b.lock().unwrap();
385                 }
386                 {
387                         let _b = b.lock().unwrap();
388                         let _a = a.lock().unwrap();
389                 }
390         }
391
392         #[test]
393         #[should_panic]
394         fn write_lockorder_fail() {
395                 let a = RwLock::new(());
396                 let b = RwLock::new(());
397                 {
398                         let _a = a.write().unwrap();
399                         let _b = b.write().unwrap();
400                 }
401                 {
402                         let _b = b.write().unwrap();
403                         let _a = a.write().unwrap();
404                 }
405         }
406
407         #[test]
408         #[should_panic]
409         fn read_lockorder_fail() {
410                 let a = RwLock::new(());
411                 let b = RwLock::new(());
412                 {
413                         let _a = a.read().unwrap();
414                         let _b = b.read().unwrap();
415                 }
416                 {
417                         let _b = b.read().unwrap();
418                         let _a = a.read().unwrap();
419                 }
420         }
421
422         #[test]
423         fn read_recursive_no_lockorder() {
424                 // Like the above, but note that no lockorder is implied when we recursively read-lock a
425                 // RwLock, causing this to pass just fine.
426                 let a = RwLock::new(());
427                 let b = RwLock::new(());
428                 let _outer = a.read().unwrap();
429                 {
430                         let _a = a.read().unwrap();
431                         let _b = b.read().unwrap();
432                 }
433                 {
434                         let _b = b.read().unwrap();
435                         let _a = a.read().unwrap();
436                 }
437         }
438
439         #[test]
440         #[should_panic]
441         fn read_write_lockorder_fail() {
442                 let a = RwLock::new(());
443                 let b = RwLock::new(());
444                 {
445                         let _a = a.write().unwrap();
446                         let _b = b.read().unwrap();
447                 }
448                 {
449                         let _b = b.read().unwrap();
450                         let _a = a.write().unwrap();
451                 }
452         }
453 }