Merge pull request #2033 from tnull/2023-02-make-esplora-sync-test-more-robust
[rust-lightning] / lightning / src / sync / debug_sync.rs
index b61d1cb55e8cff3143c686c4ad6fbc13427b47d9..5631093723733f16f4d7447c0865f58219d1cf40 100644 (file)
@@ -14,6 +14,8 @@ use std::sync::Condvar as StdCondvar;
 
 use crate::prelude::HashMap;
 
+use super::{LockTestExt, LockHeldState};
+
 #[cfg(feature = "backtrace")]
 use {crate::prelude::hash_map, backtrace::Backtrace, std::sync::Once};
 
@@ -77,7 +79,7 @@ fn get_construction_location(backtrace: &Backtrace) -> String {
        // Find the first frame that is after `debug_sync` (or that is in our tests) and use
        // that as the mutex construction site. Note that the first few frames may be in
        // the `backtrace` crate, so we have to ignore those.
-       let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync.*new").unwrap();
+       let sync_mutex_constr_regex = regex::Regex::new(r"lightning.*debug_sync").unwrap();
        let mut found_debug_sync = false;
        for frame in backtrace.frames() {
                for symbol in frame.symbols() {
@@ -168,6 +170,18 @@ impl LockMetadata {
        fn pre_lock(this: &Arc<LockMetadata>) { Self::_pre_lock(this, false); }
        fn pre_read_lock(this: &Arc<LockMetadata>) -> bool { Self::_pre_lock(this, true) }
 
+       fn held_by_thread(this: &Arc<LockMetadata>) -> LockHeldState {
+               let mut res = LockHeldState::NotHeldByThread;
+               LOCKS_HELD.with(|held| {
+                       for (locked_idx, _locked) in held.borrow().iter() {
+                               if *locked_idx == this.lock_idx {
+                                       res = LockHeldState::HeldByThread;
+                               }
+                       }
+               });
+               res
+       }
+
        fn try_locked(this: &Arc<LockMetadata>) {
                LOCKS_HELD.with(|held| {
                        // Since a try-lock will simply fail if the lock is held already, we do not
@@ -248,6 +262,13 @@ impl<T> Mutex<T> {
        }
 }
 
+impl <T> LockTestExt for Mutex<T> {
+       #[inline]
+       fn held_by_thread(&self) -> LockHeldState {
+               LockMetadata::held_by_thread(&self.deps)
+       }
+}
+
 pub struct RwLock<T: Sized> {
        inner: StdRwLock<T>,
        deps: Arc<LockMetadata>,
@@ -332,101 +353,11 @@ impl<T> RwLock<T> {
        }
 }
 
-pub type FairRwLock<T> = RwLock<T>;
-
-mod tests {
-       use super::{RwLock, Mutex};
-
-       #[test]
-       #[should_panic]
-       #[cfg(not(feature = "backtrace"))]
-       fn recursive_lock_fail() {
-               let mutex = Mutex::new(());
-               let _a = mutex.lock().unwrap();
-               let _b = mutex.lock().unwrap();
-       }
-
-       #[test]
-       fn recursive_read() {
-               let lock = RwLock::new(());
-               let _a = lock.read().unwrap();
-               let _b = lock.read().unwrap();
-       }
-
-       #[test]
-       #[should_panic]
-       fn lockorder_fail() {
-               let a = Mutex::new(());
-               let b = Mutex::new(());
-               {
-                       let _a = a.lock().unwrap();
-                       let _b = b.lock().unwrap();
-               }
-               {
-                       let _b = b.lock().unwrap();
-                       let _a = a.lock().unwrap();
-               }
-       }
-
-       #[test]
-       #[should_panic]
-       fn write_lockorder_fail() {
-               let a = RwLock::new(());
-               let b = RwLock::new(());
-               {
-                       let _a = a.write().unwrap();
-                       let _b = b.write().unwrap();
-               }
-               {
-                       let _b = b.write().unwrap();
-                       let _a = a.write().unwrap();
-               }
-       }
-
-       #[test]
-       #[should_panic]
-       fn read_lockorder_fail() {
-               let a = RwLock::new(());
-               let b = RwLock::new(());
-               {
-                       let _a = a.read().unwrap();
-                       let _b = b.read().unwrap();
-               }
-               {
-                       let _b = b.read().unwrap();
-                       let _a = a.read().unwrap();
-               }
-       }
-
-       #[test]
-       fn read_recursive_no_lockorder() {
-               // Like the above, but note that no lockorder is implied when we recursively read-lock a
-               // RwLock, causing this to pass just fine.
-               let a = RwLock::new(());
-               let b = RwLock::new(());
-               let _outer = a.read().unwrap();
-               {
-                       let _a = a.read().unwrap();
-                       let _b = b.read().unwrap();
-               }
-               {
-                       let _b = b.read().unwrap();
-                       let _a = a.read().unwrap();
-               }
-       }
-
-       #[test]
-       #[should_panic]
-       fn read_write_lockorder_fail() {
-               let a = RwLock::new(());
-               let b = RwLock::new(());
-               {
-                       let _a = a.write().unwrap();
-                       let _b = b.read().unwrap();
-               }
-               {
-                       let _b = b.read().unwrap();
-                       let _a = a.write().unwrap();
-               }
+impl <T> LockTestExt for RwLock<T> {
+       #[inline]
+       fn held_by_thread(&self) -> LockHeldState {
+               LockMetadata::held_by_thread(&self.deps)
        }
 }
+
+pub type FairRwLock<T> = RwLock<T>;