]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/debug_sync.rs
Refund building tests
[rust-lightning] / lightning / src / debug_sync.rs
1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
4
5 use std::cell::RefCell;
6
7 use std::sync::atomic::{AtomicUsize, Ordering};
8 use std::sync::Mutex as StdMutex;
9 use std::sync::MutexGuard as StdMutexGuard;
10 use std::sync::RwLock as StdRwLock;
11 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
12 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
13 use std::sync::Condvar as StdCondvar;
14
15 use crate::prelude::HashMap;
16
17 #[cfg(feature = "backtrace")]
18 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
19
20 #[cfg(not(feature = "backtrace"))]
21 struct Backtrace{}
22 #[cfg(not(feature = "backtrace"))]
23 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
24
25 pub type LockResult<Guard> = Result<Guard, ()>;
26
27 pub struct Condvar {
28         inner: StdCondvar,
29 }
30
31 impl Condvar {
32         pub fn new() -> Condvar {
33                 Condvar { inner: StdCondvar::new() }
34         }
35
36         pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
37                 let mutex: &'a Mutex<T> = guard.mutex;
38                 self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
39         }
40
41         #[allow(unused)]
42         pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
43                 let mutex = guard.mutex;
44                 self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
45         }
46
47         pub fn notify_all(&self) { self.inner.notify_all(); }
48 }
49
50 thread_local! {
51         /// We track the set of locks currently held by a reference to their `LockMetadata`
52         static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
53 }
54 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
55
56 #[cfg(feature = "backtrace")]
57 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
58 #[cfg(feature = "backtrace")]
59 static LOCKS_INIT: Once = Once::new();
60
61 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
62 /// when the Mutex itself was constructed.
63 struct LockMetadata {
64         lock_idx: u64,
65         locked_before: StdMutex<HashMap<u64, LockDep>>,
66         _lock_construction_bt: Backtrace,
67 }
68
69 struct LockDep {
70         lock: Arc<LockMetadata>,
71         /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
72         _lockdep_trace: Backtrace,
73 }
74
75 #[cfg(feature = "backtrace")]
76 fn get_construction_location(backtrace: &Backtrace) -> String {
77         // Find the first frame that is after `debug_sync` (or that is in our tests) and use
78         // that as the mutex construction site. Note that the first few frames may be in
79         // the `backtrace` crate, so we have to ignore those.
80         let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
81         let mut found_debug_sync = false;
82         for frame in backtrace.frames() {
83                 for symbol in frame.symbols() {
84                         let symbol_name = symbol.name().unwrap().as_str().unwrap();
85                         if !sync_mutex_constr_regex.is_match(symbol_name) {
86                                 if found_debug_sync {
87                                         if let Some(col) = symbol.colno() {
88                                                 return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col);
89                                         } else {
90                                                 // Windows debug symbols don't support column numbers, so fall back to
91                                                 // line numbers only if no `colno` is available
92                                                 return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap());
93                                         }
94                                 }
95                         } else { found_debug_sync = true; }
96                 }
97         }
98         panic!("Couldn't find mutex construction callsite");
99 }
100
101 impl LockMetadata {
102         fn new() -> Arc<LockMetadata> {
103                 let backtrace = Backtrace::new();
104                 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
105
106                 let res = Arc::new(LockMetadata {
107                         locked_before: StdMutex::new(HashMap::new()),
108                         lock_idx,
109                         _lock_construction_bt: backtrace,
110                 });
111
112                 #[cfg(feature = "backtrace")]
113                 {
114                         let lock_constr_location = get_construction_location(&res._lock_construction_bt);
115                         LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
116                         let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
117                         match locks.entry(lock_constr_location) {
118                                 hash_map::Entry::Occupied(e) => return Arc::clone(e.get()),
119                                 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
120                         }
121                 }
122                 res
123         }
124
125         // Returns whether we were a recursive lock (only relevant for read)
126         fn _pre_lock(this: &Arc<LockMetadata>, read: bool) -> bool {
127                 let mut inserted = false;
128                 LOCKS_HELD.with(|held| {
129                         // For each lock which is currently locked, check that no lock's locked-before
130                         // set includes the lock we're about to lock, which would imply a lockorder
131                         // inversion.
132                         for (locked_idx, _locked) in held.borrow().iter() {
133                                 if read && *locked_idx == this.lock_idx {
134                                         // Recursive read locks are explicitly allowed
135                                         return;
136                                 }
137                         }
138                         for (locked_idx, locked) in held.borrow().iter() {
139                                 if !read && *locked_idx == this.lock_idx {
140                                         // With `feature = "backtrace"` set, we may be looking at different instances
141                                         // of the same lock.
142                                         debug_assert!(cfg!(feature = "backtrace"), "Tried to acquire a lock while it was held!");
143                                 }
144                                 for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
145                                         if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx {
146                                                 #[cfg(feature = "backtrace")]
147                                                 panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
148                                                         get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt,
149                                                         get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt,
150                                                         _locked_dep._lockdep_trace);
151                                                 #[cfg(not(feature = "backtrace"))]
152                                                 panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
153                                         }
154                                 }
155                                 // Insert any already-held locks in our locked-before set.
156                                 let mut locked_before = this.locked_before.lock().unwrap();
157                                 if !locked_before.contains_key(&locked.lock_idx) {
158                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
159                                         locked_before.insert(lockdep.lock.lock_idx, lockdep);
160                                 }
161                         }
162                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
163                         inserted = true;
164                 });
165                 inserted
166         }
167
168         fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
169         fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
170
171         fn try_locked(this: &Arc<LockMetadata>) {
172                 LOCKS_HELD.with(|held| {
173                         // Since a try-lock will simply fail if the lock is held already, we do not
174                         // consider try-locks to ever generate lockorder inversions. However, if a try-lock
175                         // succeeds, we do consider it to have created lockorder dependencies.
176                         let mut locked_before = this.locked_before.lock().unwrap();
177                         for (locked_idx, locked) in held.borrow().iter() {
178                                 if !locked_before.contains_key(locked_idx) {
179                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
180                                         locked_before.insert(*locked_idx, lockdep);
181                                 }
182                         }
183                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
184                 });
185         }
186 }
187
188 pub struct Mutex<T: Sized> {
189         inner: StdMutex<T>,
190         deps: Arc<LockMetadata>,
191 }
192
193 #[must_use = "if unused the Mutex will immediately unlock"]
194 pub struct MutexGuard<'a, T: Sized + 'a> {
195         mutex: &'a Mutex<T>,
196         lock: StdMutexGuard<'a, T>,
197 }
198
199 impl<'a, T: Sized> MutexGuard<'a, T> {
200         fn into_inner(self) -> StdMutexGuard<'a, T> {
201                 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
202                 unsafe {
203                         let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
204                         std::mem::forget(self);
205                         v
206                 }
207         }
208 }
209
210 impl<T: Sized> Drop for MutexGuard<'_, T> {
211         fn drop(&mut self) {
212                 LOCKS_HELD.with(|held| {
213                         held.borrow_mut().remove(&self.mutex.deps.lock_idx);
214                 });
215         }
216 }
217
218 impl<T: Sized> Deref for MutexGuard<'_, T> {
219         type Target = T;
220
221         fn deref(&self) -> &T {
222                 &self.lock.deref()
223         }
224 }
225
226 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
227         fn deref_mut(&mut self) -> &mut T {
228                 self.lock.deref_mut()
229         }
230 }
231
232 impl<T> Mutex<T> {
233         pub fn new(inner: T) -> Mutex<T> {
234                 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
235         }
236
237         pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
238                 LockMetadata::pre_lock(&self.deps);
239                 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
240         }
241
242         pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
243                 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
244                 if res.is_ok() {
245                         LockMetadata::try_locked(&self.deps);
246                 }
247                 res
248         }
249 }
250
251 pub struct RwLock<T: Sized> {
252         inner: StdRwLock<T>,
253         deps: Arc<LockMetadata>,
254 }
255
256 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
257         lock: &'a RwLock<T>,
258         first_lock: bool,
259         guard: StdRwLockReadGuard<'a, T>,
260 }
261
262 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
263         lock: &'a RwLock<T>,
264         guard: StdRwLockWriteGuard<'a, T>,
265 }
266
267 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
268         type Target = T;
269
270         fn deref(&self) -> &T {
271                 &self.guard.deref()
272         }
273 }
274
275 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
276         fn drop(&mut self) {
277                 if !self.first_lock {
278                         // Note that its not strictly true that the first taken read lock will get unlocked
279                         // last, but in practice our locks are always taken as RAII, so it should basically
280                         // always be true.
281                         return;
282                 }
283                 LOCKS_HELD.with(|held| {
284                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
285                 });
286         }
287 }
288
289 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
290         type Target = T;
291
292         fn deref(&self) -> &T {
293                 &self.guard.deref()
294         }
295 }
296
297 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
298         fn drop(&mut self) {
299                 LOCKS_HELD.with(|held| {
300                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
301                 });
302         }
303 }
304
305 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
306         fn deref_mut(&mut self) -> &mut T {
307                 self.guard.deref_mut()
308         }
309 }
310
311 impl<T> RwLock<T> {
312         pub fn new(inner: T) -> RwLock<T> {
313                 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
314         }
315
316         pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
317                 let first_lock = LockMetadata::pre_read_lock(&self.deps);
318                 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard, first_lock }).map_err(|_| ())
319         }
320
321         pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
322                 LockMetadata::pre_lock(&self.deps);
323                 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
324         }
325
326         pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
327                 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
328                 if res.is_ok() {
329                         LockMetadata::try_locked(&self.deps);
330                 }
331                 res
332         }
333 }
334
335 pub type FairRwLock<T> = RwLock<T>;
336
337 mod tests {
338         use super::{RwLock, Mutex};
339
340         #[test]
341         #[should_panic]
342         #[cfg(not(feature = "backtrace"))]
343         fn recursive_lock_fail() {
344                 let mutex = Mutex::new(());
345                 let _a = mutex.lock().unwrap();
346                 let _b = mutex.lock().unwrap();
347         }
348
349         #[test]
350         fn recursive_read() {
351                 let lock = RwLock::new(());
352                 let _a = lock.read().unwrap();
353                 let _b = lock.read().unwrap();
354         }
355
356         #[test]
357         #[should_panic]
358         fn lockorder_fail() {
359                 let a = Mutex::new(());
360                 let b = Mutex::new(());
361                 {
362                         let _a = a.lock().unwrap();
363                         let _b = b.lock().unwrap();
364                 }
365                 {
366                         let _b = b.lock().unwrap();
367                         let _a = a.lock().unwrap();
368                 }
369         }
370
371         #[test]
372         #[should_panic]
373         fn write_lockorder_fail() {
374                 let a = RwLock::new(());
375                 let b = RwLock::new(());
376                 {
377                         let _a = a.write().unwrap();
378                         let _b = b.write().unwrap();
379                 }
380                 {
381                         let _b = b.write().unwrap();
382                         let _a = a.write().unwrap();
383                 }
384         }
385
386         #[test]
387         #[should_panic]
388         fn read_lockorder_fail() {
389                 let a = RwLock::new(());
390                 let b = RwLock::new(());
391                 {
392                         let _a = a.read().unwrap();
393                         let _b = b.read().unwrap();
394                 }
395                 {
396                         let _b = b.read().unwrap();
397                         let _a = a.read().unwrap();
398                 }
399         }
400
401         #[test]
402         fn read_recursive_no_lockorder() {
403                 // Like the above, but note that no lockorder is implied when we recursively read-lock a
404                 // RwLock, causing this to pass just fine.
405                 let a = RwLock::new(());
406                 let b = RwLock::new(());
407                 let _outer = a.read().unwrap();
408                 {
409                         let _a = a.read().unwrap();
410                         let _b = b.read().unwrap();
411                 }
412                 {
413                         let _b = b.read().unwrap();
414                         let _a = a.read().unwrap();
415                 }
416         }
417
418         #[test]
419         #[should_panic]
420         fn read_write_lockorder_fail() {
421                 let a = RwLock::new(());
422                 let b = RwLock::new(());
423                 {
424                         let _a = a.write().unwrap();
425                         let _b = b.read().unwrap();
426                 }
427                 {
428                         let _b = b.read().unwrap();
429                         let _a = a.write().unwrap();
430                 }
431         }
432 }