]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/sync/debug_sync.rs
Merge pull request #2868 from orbitalturtle/export-send-onion-path
[rust-lightning] / lightning / src / sync / debug_sync.rs
1 pub use ::alloc::sync::Arc;
2 use core::ops::{Deref, DerefMut};
3 use core::time::Duration;
4
5 use std::cell::RefCell;
6
7 use std::sync::atomic::{AtomicUsize, Ordering};
8 use std::sync::Mutex as StdMutex;
9 use std::sync::MutexGuard as StdMutexGuard;
10 use std::sync::RwLock as StdRwLock;
11 use std::sync::RwLockReadGuard as StdRwLockReadGuard;
12 use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
13 use std::sync::Condvar as StdCondvar;
14
15 pub use std::sync::WaitTimeoutResult;
16
17 use crate::prelude::HashMap;
18
19 use super::{LockTestExt, LockHeldState};
20
21 #[cfg(feature = "backtrace")]
22 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
23
24 #[cfg(not(feature = "backtrace"))]
25 struct Backtrace{}
26 #[cfg(not(feature = "backtrace"))]
27 impl Backtrace { fn new() -> Backtrace { Backtrace {} } }
28
29 pub type LockResult<Guard> = Result<Guard, ()>;
30
31 pub struct Condvar {
32         inner: StdCondvar,
33 }
34
35 impl Condvar {
36         pub fn new() -> Condvar {
37                 Condvar { inner: StdCondvar::new() }
38         }
39
40         pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, condition: F)
41         -> LockResult<MutexGuard<'a, T>> {
42                 let mutex: &'a Mutex<T> = guard.mutex;
43                 self.inner.wait_while(guard.into_inner(), condition).map(|lock| MutexGuard { mutex, lock })
44                         .map_err(|_| ())
45         }
46
47         #[allow(unused)]
48         pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F)
49         -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
50                 let mutex = guard.mutex;
51                 self.inner.wait_timeout_while(guard.into_inner(), dur, condition).map_err(|_| ())
52                         .map(|(lock, e)| (MutexGuard { mutex, lock }, e))
53         }
54
55         pub fn notify_all(&self) { self.inner.notify_all(); }
56 }
57
58 thread_local! {
59         /// We track the set of locks currently held by a reference to their `LockMetadata`
60         static LOCKS_HELD: RefCell<HashMap<u64, Arc<LockMetadata>>> = RefCell::new(HashMap::new());
61 }
62 static LOCK_IDX: AtomicUsize = AtomicUsize::new(0);
63
64 #[cfg(feature = "backtrace")]
65 static mut LOCKS: Option<StdMutex<HashMap<String, Arc<LockMetadata>>>> = None;
66 #[cfg(feature = "backtrace")]
67 static LOCKS_INIT: Once = Once::new();
68
69 /// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
70 /// when the Mutex itself was constructed.
71 struct LockMetadata {
72         lock_idx: u64,
73         locked_before: StdMutex<HashMap<u64, LockDep>>,
74         _lock_construction_bt: Backtrace,
75 }
76
77 struct LockDep {
78         lock: Arc<LockMetadata>,
79         /// lockdep_trace is unused unless we're building with `backtrace`, so we mark it _
80         _lockdep_trace: Backtrace,
81 }
82
83 // Locates the frame preceding the earliest `debug_sync` frame in the call stack. This ensures we
84 // can properly detect a lock's construction and acquiral callsites, since the latter may contain
85 // multiple `debug_sync` frames.
86 #[cfg(feature = "backtrace")]
87 fn locate_call_symbol(backtrace: &Backtrace) -> (String, Option<u32>) {
88         // Find the earliest `debug_sync` frame (or that is in our tests) and use the frame preceding it
89         // as the callsite. Note that the first few frames may be in the `backtrace` crate, so we have
90         // to ignore those.
91         let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
92         let mut found_debug_sync = false;
93         let mut symbol_after_latest_debug_sync = None;
94         for frame in backtrace.frames().iter() {
95                 for symbol in frame.symbols().iter() {
96                         if let Some(symbol_name) = symbol.name().map(|name| name.as_str()).flatten() {
97                                 if !sync_mutex_constr_regex.is_match(symbol_name) {
98                                         if found_debug_sync {
99                                                 symbol_after_latest_debug_sync = Some(symbol);
100                                                 found_debug_sync = false;
101                                         }
102                                 } else { found_debug_sync = true; }
103                         }
104                 }
105         }
106         let symbol = symbol_after_latest_debug_sync.expect("Couldn't find lock call symbol");
107         (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno())
108 }
109
110 impl LockMetadata {
111         fn new() -> Arc<LockMetadata> {
112                 let backtrace = Backtrace::new();
113                 let lock_idx = LOCK_IDX.fetch_add(1, Ordering::Relaxed) as u64;
114
115                 let res = Arc::new(LockMetadata {
116                         locked_before: StdMutex::new(HashMap::new()),
117                         lock_idx,
118                         _lock_construction_bt: backtrace,
119                 });
120
121                 #[cfg(feature = "backtrace")]
122                 {
123                         let (lock_constr_location, lock_constr_colno) =
124                                 locate_call_symbol(&res._lock_construction_bt);
125                         LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } });
126                         let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap();
127                         match locks.entry(lock_constr_location) {
128                                 hash_map::Entry::Occupied(e) => {
129                                         assert_eq!(lock_constr_colno,
130                                                 locate_call_symbol(&e.get()._lock_construction_bt).1,
131                                                 "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives.");
132                                         return Arc::clone(e.get())
133                                 },
134                                 hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); },
135                         }
136                 }
137                 res
138         }
139
140         fn pre_lock(this: &Arc<LockMetadata>, _double_lock_self_allowed: bool) {
141                 LOCKS_HELD.with(|held| {
142                         // For each lock that is currently held, check that no lock's `locked_before` set
143                         // includes the lock we're about to hold, which would imply a lockorder inversion.
144                         for (locked_idx, _locked) in held.borrow().iter() {
145                                 if *locked_idx == this.lock_idx {
146                                         // Note that with `feature = "backtrace"` set, we may be looking at different
147                                         // instances of the same lock. Still, doing so is quite risky, a total order
148                                         // must be maintained, and doing so across a set of otherwise-identical mutexes
149                                         // is fraught with issues.
150                                         #[cfg(feature = "backtrace")]
151                                         debug_assert!(_double_lock_self_allowed,
152                                                 "Tried to acquire a lock while it was held!\nLock constructed at {}",
153                                                 locate_call_symbol(&this._lock_construction_bt).0);
154                                         #[cfg(not(feature = "backtrace"))]
155                                         panic!("Tried to acquire a lock while it was held!");
156                                 }
157                         }
158                         for (_locked_idx, locked) in held.borrow().iter() {
159                                 for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() {
160                                         let is_dep_this_lock = *locked_dep_idx == this.lock_idx;
161                                         let has_same_construction = *locked_dep_idx == locked.lock_idx;
162                                         if is_dep_this_lock && !has_same_construction {
163                                                 #[allow(unused_mut, unused_assignments)]
164                                                 let mut has_same_callsite = false;
165                                                 #[cfg(feature = "backtrace")] {
166                                                         has_same_callsite = _double_lock_self_allowed &&
167                                                                 locate_call_symbol(&_locked_dep._lockdep_trace) ==
168                                                                         locate_call_symbol(&Backtrace::new());
169                                                 }
170                                                 if !has_same_callsite {
171                                                         #[cfg(feature = "backtrace")]
172                                                         panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n",
173                                                                 locate_call_symbol(&this._lock_construction_bt).0,
174                                                                 this.lock_idx, this._lock_construction_bt,
175                                                                 locate_call_symbol(&locked._lock_construction_bt).0,
176                                                                 locked.lock_idx, locked._lock_construction_bt,
177                                                                 _locked_dep._lockdep_trace);
178                                                         #[cfg(not(feature = "backtrace"))]
179                                                         panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info.");
180                                                 }
181                                         }
182                                 }
183                                 // Insert any already-held locks in our locked-before set.
184                                 let mut locked_before = this.locked_before.lock().unwrap();
185                                 if !locked_before.contains_key(&locked.lock_idx) {
186                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
187                                         locked_before.insert(lockdep.lock.lock_idx, lockdep);
188                                 }
189                         }
190                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
191                 });
192         }
193
194         fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
195                 let mut res = LockHeldState::NotHeldByThread;
196                 LOCKS_HELD.with(|held| {
197                         for (locked_idx, _locked) in held.borrow().iter() {
198                                 if *locked_idx == this.lock_idx {
199                                         res = LockHeldState::HeldByThread;
200                                 }
201                         }
202                 });
203                 res
204         }
205
206         fn try_locked(this: &Arc<LockMetadata>) {
207                 LOCKS_HELD.with(|held| {
208                         // Since a try-lock will simply fail if the lock is held already, we do not
209                         // consider try-locks to ever generate lockorder inversions. However, if a try-lock
210                         // succeeds, we do consider it to have created lockorder dependencies.
211                         let mut locked_before = this.locked_before.lock().unwrap();
212                         for (locked_idx, locked) in held.borrow().iter() {
213                                 if !locked_before.contains_key(locked_idx) {
214                                         let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() };
215                                         locked_before.insert(*locked_idx, lockdep);
216                                 }
217                         }
218                         held.borrow_mut().insert(this.lock_idx, Arc::clone(this));
219                 });
220         }
221 }
222
223 pub struct Mutex<T: Sized> {
224         inner: StdMutex<T>,
225         deps: Arc<LockMetadata>,
226 }
227 impl<T: Sized> Mutex<T> {
228         pub(crate) fn into_inner(self) -> LockResult<T> {
229                 self.inner.into_inner().map_err(|_| ())
230         }
231 }
232
233 #[must_use = "if unused the Mutex will immediately unlock"]
234 pub struct MutexGuard<'a, T: Sized + 'a> {
235         mutex: &'a Mutex<T>,
236         lock: StdMutexGuard<'a, T>,
237 }
238
239 impl<'a, T: Sized> MutexGuard<'a, T> {
240         fn into_inner(self) -> StdMutexGuard<'a, T> {
241                 // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509.
242                 unsafe {
243                         let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock);
244                         std::mem::forget(self);
245                         v
246                 }
247         }
248 }
249
250 impl<T: Sized> Drop for MutexGuard<'_, T> {
251         fn drop(&mut self) {
252                 LOCKS_HELD.with(|held| {
253                         held.borrow_mut().remove(&self.mutex.deps.lock_idx);
254                 });
255         }
256 }
257
258 impl<T: Sized> Deref for MutexGuard<'_, T> {
259         type Target = T;
260
261         fn deref(&self) -> &T {
262                 &self.lock.deref()
263         }
264 }
265
266 impl<T: Sized> DerefMut for MutexGuard<'_, T> {
267         fn deref_mut(&mut self) -> &mut T {
268                 self.lock.deref_mut()
269         }
270 }
271
272 impl<T> Mutex<T> {
273         pub fn new(inner: T) -> Mutex<T> {
274                 Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() }
275         }
276
277         pub fn lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
278                 LockMetadata::pre_lock(&self.deps, false);
279                 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ())
280         }
281
282         pub fn try_lock<'a>(&'a self) -> LockResult<MutexGuard<'a, T>> {
283                 let res = self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ());
284                 if res.is_ok() {
285                         LockMetadata::try_locked(&self.deps);
286                 }
287                 res
288         }
289 }
290
291 impl<'a, T: 'a> LockTestExt<'a> for Mutex<T> {
292         #[inline]
293         fn held_by_thread(&self) -> LockHeldState {
294                 LockMetadata::held_by_thread(&self.deps)
295         }
296         type ExclLock = MutexGuard<'a, T>;
297         #[inline]
298         fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<T> {
299                 LockMetadata::pre_lock(&self.deps, true);
300                 self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap()
301         }
302 }
303
304 pub struct RwLock<T: Sized> {
305         inner: StdRwLock<T>,
306         deps: Arc<LockMetadata>,
307 }
308
309 pub struct RwLockReadGuard<'a, T: Sized + 'a> {
310         lock: &'a RwLock<T>,
311         guard: StdRwLockReadGuard<'a, T>,
312 }
313
314 pub struct RwLockWriteGuard<'a, T: Sized + 'a> {
315         lock: &'a RwLock<T>,
316         guard: StdRwLockWriteGuard<'a, T>,
317 }
318
319 impl<T: Sized> Deref for RwLockReadGuard<'_, T> {
320         type Target = T;
321
322         fn deref(&self) -> &T {
323                 &self.guard.deref()
324         }
325 }
326
327 impl<T: Sized> Drop for RwLockReadGuard<'_, T> {
328         fn drop(&mut self) {
329                 LOCKS_HELD.with(|held| {
330                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
331                 });
332         }
333 }
334
335 impl<T: Sized> Deref for RwLockWriteGuard<'_, T> {
336         type Target = T;
337
338         fn deref(&self) -> &T {
339                 &self.guard.deref()
340         }
341 }
342
343 impl<T: Sized> Drop for RwLockWriteGuard<'_, T> {
344         fn drop(&mut self) {
345                 LOCKS_HELD.with(|held| {
346                         held.borrow_mut().remove(&self.lock.deps.lock_idx);
347                 });
348         }
349 }
350
351 impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
352         fn deref_mut(&mut self) -> &mut T {
353                 self.guard.deref_mut()
354         }
355 }
356
357 impl<T> RwLock<T> {
358         pub fn new(inner: T) -> RwLock<T> {
359                 RwLock { inner: StdRwLock::new(inner), deps: LockMetadata::new() }
360         }
361
362         pub fn read<'a>(&'a self) -> LockResult<RwLockReadGuard<'a, T>> {
363                 // Note that while we could be taking a recursive read lock here, Rust's `RwLock` may
364                 // deadlock trying to take a second read lock if another thread is waiting on the write
365                 // lock. This behavior is platform dependent, but our in-tree `FairRwLock` guarantees
366                 // such a deadlock.
367                 LockMetadata::pre_lock(&self.deps, false);
368                 self.inner.read().map(|guard| RwLockReadGuard { lock: self, guard }).map_err(|_| ())
369         }
370
371         pub fn write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
372                 LockMetadata::pre_lock(&self.deps, false);
373                 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ())
374         }
375
376         pub fn try_write<'a>(&'a self) -> LockResult<RwLockWriteGuard<'a, T>> {
377                 let res = self.inner.try_write().map(|guard| RwLockWriteGuard { lock: self, guard }).map_err(|_| ());
378                 if res.is_ok() {
379                         LockMetadata::try_locked(&self.deps);
380                 }
381                 res
382         }
383 }
384
385 impl<'a, T: 'a> LockTestExt<'a> for RwLock<T> {
386         #[inline]
387         fn held_by_thread(&self) -> LockHeldState {
388                 LockMetadata::held_by_thread(&self.deps)
389         }
390         type ExclLock = RwLockWriteGuard<'a, T>;
391         #[inline]
392         fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
393                 LockMetadata::pre_lock(&self.deps, true);
394                 self.inner.write().map(|guard| RwLockWriteGuard { lock: self, guard }).unwrap()
395         }
396 }
397
398 pub type FairRwLock<T> = RwLock<T>;