X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fsync%2Fdebug_sync.rs;h=11824d5bc73182ebf3d77fc53dd7a53b2be59045;hb=0188861585db577723c4adedc43acd0f975944a5;hp=721245811771fa56659aa0e9ba221ab880db9ca2;hpb=f082ad40b5006cce13090dcbac80db8951cff8fe;p=rust-lightning diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index 72124581..11824d5b 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -75,7 +75,7 @@ struct LockDep { } #[cfg(feature = "backtrace")] -fn get_construction_location(backtrace: &Backtrace) -> String { +fn get_construction_location(backtrace: &Backtrace) -> (String, Option) { // Find the first frame that is after `debug_sync` (or that is in our tests) and use // that as the mutex construction site. Note that the first few frames may be in // the `backtrace` crate, so we have to ignore those. @@ -86,13 +86,7 @@ fn get_construction_location(backtrace: &Backtrace) -> String { let symbol_name = symbol.name().unwrap().as_str().unwrap(); if !sync_mutex_constr_regex.is_match(symbol_name) { if found_debug_sync { - if let Some(col) = symbol.colno() { - return format!("{}:{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap(), col); - } else { - // Windows debug symbols don't support column numbers, so fall back to - // line numbers only if no `colno` is available - return format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()); - } + return (format!("{}:{}", symbol.filename().unwrap().display(), symbol.lineno().unwrap()), symbol.colno()); } } else { found_debug_sync = true; } } @@ -113,11 +107,17 @@ impl LockMetadata { #[cfg(feature = "backtrace")] { - let lock_constr_location = get_construction_location(&res._lock_construction_bt); + let (lock_constr_location, lock_constr_colno) = + get_construction_location(&res._lock_construction_bt); LOCKS_INIT.call_once(|| { unsafe { LOCKS = Some(StdMutex::new(HashMap::new())); } }); let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); match locks.entry(lock_constr_location) { - hash_map::Entry::Occupied(e) => return Arc::clone(e.get()), + hash_map::Entry::Occupied(e) => { + assert_eq!(lock_constr_colno, + get_construction_location(&e.get()._lock_construction_bt).1, + "Because Windows doesn't support column number results in backtraces, we cannot construct two mutexes on the same line or we risk lockorder detection false positives."); + return Arc::clone(e.get()) + }, hash_map::Entry::Vacant(e) => { e.insert(Arc::clone(&res)); }, } } @@ -129,7 +129,7 @@ impl LockMetadata { // For each lock which is currently locked, check that no lock's locked-before // set includes the lock we're about to lock, which would imply a lockorder // inversion. - for (locked_idx, locked) in held.borrow().iter() { + for (locked_idx, _locked) in held.borrow().iter() { if *locked_idx == this.lock_idx { // Note that with `feature = "backtrace"` set, we may be looking at different // instances of the same lock. Still, doing so is quite risky, a total order @@ -138,18 +138,20 @@ impl LockMetadata { #[cfg(feature = "backtrace")] debug_assert!(_double_lock_self_allowed, "Tried to acquire a lock while it was held!\nLock constructed at {}", - get_construction_location(&this._lock_construction_bt)); + get_construction_location(&this._lock_construction_bt).0); #[cfg(not(feature = "backtrace"))] panic!("Tried to acquire a lock while it was held!"); } } - for (locked_idx, locked) in held.borrow().iter() { + for (_locked_idx, locked) in held.borrow().iter() { for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { if *locked_dep_idx == this.lock_idx && *locked_dep_idx != locked.lock_idx { #[cfg(feature = "backtrace")] panic!("Tried to violate existing lockorder.\nMutex that should be locked after the current lock was created at the following backtrace.\nNote that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\nLock being taken constructed at: {} ({}):\n{:?}\nLock constructed at: {} ({})\n{:?}\n\nLock dep created at:\n{:?}\n\n", - get_construction_location(&this._lock_construction_bt), this.lock_idx, this._lock_construction_bt, - get_construction_location(&locked._lock_construction_bt), locked.lock_idx, locked._lock_construction_bt, + get_construction_location(&this._lock_construction_bt).0, + this.lock_idx, this._lock_construction_bt, + get_construction_location(&locked._lock_construction_bt).0, + locked.lock_idx, locked._lock_construction_bt, _locked_dep._lockdep_trace); #[cfg(not(feature = "backtrace"))] panic!("Tried to violate existing lockorder. Build with the backtrace feature for more info."); @@ -199,6 +201,11 @@ pub struct Mutex { inner: StdMutex, deps: Arc, } +impl Mutex { + pub(crate) fn into_inner(self) -> LockResult { + self.inner.into_inner().map_err(|_| ()) + } +} #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: Sized + 'a> {