Move `fairrwlock` to the `sync` module
authorMatt Corallo <git@bluematt.me>
Tue, 7 Feb 2023 19:39:24 +0000 (19:39 +0000)
committerMatt Corallo <git@bluematt.me>
Tue, 14 Feb 2023 23:33:02 +0000 (23:33 +0000)
lightning/src/sync/fairrwlock.rs [new file with mode: 0644]
lightning/src/sync/mod.rs
lightning/src/util/fairrwlock.rs [deleted file]
lightning/src/util/mod.rs

diff --git a/lightning/src/sync/fairrwlock.rs b/lightning/src/sync/fairrwlock.rs
new file mode 100644 (file)
index 0000000..5715a8c
--- /dev/null
@@ -0,0 +1,50 @@
+use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
+use std::sync::atomic::{AtomicUsize, Ordering};
+
+/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
+/// Linux with pthreads under the hood, readers trivially and completely starve writers).
+/// Because we often hold read locks while doing message processing in multiple threads which
+/// can use significant CPU time, with write locks being time-sensitive but relatively small in
+/// CPU time, we can end up with starvation completely blocking incoming connections or pings,
+/// especially during initial graph sync.
+///
+/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
+/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
+/// blocking readers (by taking the write lock) if there are writers pending when we go to take
+/// a read lock.
+pub struct FairRwLock<T> {
+       lock: RwLock<T>,
+       waiting_writers: AtomicUsize,
+}
+
+impl<T> FairRwLock<T> {
+       pub fn new(t: T) -> Self {
+               Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
+       }
+
+       // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
+       // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
+       // memory.
+       pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
+               self.waiting_writers.fetch_add(1, Ordering::Relaxed);
+               let res = self.lock.write();
+               self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
+               res
+       }
+
+       pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
+               if self.waiting_writers.load(Ordering::Relaxed) != 0 {
+                       let _write_queue_lock = self.lock.write();
+               }
+               // Note that we don't consider ensuring that an underlying RwLock allowing writers to
+               // starve readers doesn't exhibit the same behavior here. I'm not aware of any
+               // libstd-backing RwLock which exhibits this behavior, and as documented in the
+               // struct-level documentation, it shouldn't pose a significant issue for our current
+               // codebase.
+               self.lock.read()
+       }
+
+       pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
+               self.lock.try_write()
+       }
+}
index f7226a5fa34eef114d27c6569207dbc68354b950..5b0cc9c1886611bd0dc47a8d39188f36363b825a 100644 (file)
@@ -7,9 +7,9 @@ pub use debug_sync::*;
 mod test_lockorder_checks;
 
 #[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
-pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
+pub(crate) mod fairrwlock;
 #[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))]
-pub use crate::util::fairrwlock::FairRwLock;
+pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock};
 
 #[cfg(not(feature = "std"))]
 mod nostd_sync;
diff --git a/lightning/src/util/fairrwlock.rs b/lightning/src/util/fairrwlock.rs
deleted file mode 100644 (file)
index 5715a8c..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
-/// Linux with pthreads under the hood, readers trivially and completely starve writers).
-/// Because we often hold read locks while doing message processing in multiple threads which
-/// can use significant CPU time, with write locks being time-sensitive but relatively small in
-/// CPU time, we can end up with starvation completely blocking incoming connections or pings,
-/// especially during initial graph sync.
-///
-/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
-/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
-/// blocking readers (by taking the write lock) if there are writers pending when we go to take
-/// a read lock.
-pub struct FairRwLock<T> {
-       lock: RwLock<T>,
-       waiting_writers: AtomicUsize,
-}
-
-impl<T> FairRwLock<T> {
-       pub fn new(t: T) -> Self {
-               Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
-       }
-
-       // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
-       // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
-       // memory.
-       pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
-               self.waiting_writers.fetch_add(1, Ordering::Relaxed);
-               let res = self.lock.write();
-               self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
-               res
-       }
-
-       pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
-               if self.waiting_writers.load(Ordering::Relaxed) != 0 {
-                       let _write_queue_lock = self.lock.write();
-               }
-               // Note that we don't consider ensuring that an underlying RwLock allowing writers to
-               // starve readers doesn't exhibit the same behavior here. I'm not aware of any
-               // libstd-backing RwLock which exhibits this behavior, and as documented in the
-               // struct-level documentation, it shouldn't pose a significant issue for our current
-               // codebase.
-               self.lock.read()
-       }
-
-       pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
-               self.lock.try_write()
-       }
-}
index 1673bd07f69b24d7af49f7d9c3604ea33f000422..7bcbc5a41fbf72c346f6e2ed81ddebfad43ce9f1 100644 (file)
@@ -27,8 +27,6 @@ pub mod wakers;
 pub(crate) mod atomic_counter;
 pub(crate) mod byte_utils;
 pub(crate) mod chacha20;
-#[cfg(all(any(feature = "_bench_unstable", not(test)), feature = "std"))]
-pub(crate) mod fairrwlock;
 #[cfg(fuzzing)]
 pub mod zbase32;
 #[cfg(not(fuzzing))]