From: Matt Corallo Date: Tue, 7 Feb 2023 19:39:24 +0000 (+0000) Subject: Move `fairrwlock` to the `sync` module X-Git-Tag: v0.0.114-beta~14^2~3 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=9422370dd2b2ec73115b5950fa5e19db51c8ea73;p=rust-lightning Move `fairrwlock` to the `sync` module --- diff --git a/lightning/src/sync/fairrwlock.rs b/lightning/src/sync/fairrwlock.rs new file mode 100644 index 000000000..5715a8cf6 --- /dev/null +++ b/lightning/src/sync/fairrwlock.rs @@ -0,0 +1,50 @@ +use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on +/// Linux with pthreads under the hood, readers trivially and completely starve writers). +/// Because we often hold read locks while doing message processing in multiple threads which +/// can use significant CPU time, with write locks being time-sensitive but relatively small in +/// CPU time, we can end up with starvation completely blocking incoming connections or pings, +/// especially during initial graph sync. +/// +/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock +/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by +/// blocking readers (by taking the write lock) if there are writers pending when we go to take +/// a read lock. +pub struct FairRwLock { + lock: RwLock, + waiting_writers: AtomicUsize, +} + +impl FairRwLock { + pub fn new(t: T) -> Self { + Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) } + } + + // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any + // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated + // memory. + pub fn write(&self) -> LockResult> { + self.waiting_writers.fetch_add(1, Ordering::Relaxed); + let res = self.lock.write(); + self.waiting_writers.fetch_sub(1, Ordering::Relaxed); + res + } + + pub fn read(&self) -> LockResult> { + if self.waiting_writers.load(Ordering::Relaxed) != 0 { + let _write_queue_lock = self.lock.write(); + } + // Note that we don't consider ensuring that an underlying RwLock allowing writers to + // starve readers doesn't exhibit the same behavior here. I'm not aware of any + // libstd-backing RwLock which exhibits this behavior, and as documented in the + // struct-level documentation, it shouldn't pose a significant issue for our current + // codebase. + self.lock.read() + } + + pub fn try_write(&self) -> TryLockResult> { + self.lock.try_write() + } +} diff --git a/lightning/src/sync/mod.rs b/lightning/src/sync/mod.rs index f7226a5fa..5b0cc9c18 100644 --- a/lightning/src/sync/mod.rs +++ b/lightning/src/sync/mod.rs @@ -7,9 +7,9 @@ pub use debug_sync::*; mod test_lockorder_checks; #[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))] -pub use ::std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +pub(crate) mod fairrwlock; #[cfg(all(feature = "std", any(feature = "_bench_unstable", not(test))))] -pub use crate::util::fairrwlock::FairRwLock; +pub use {std::sync::{Arc, Mutex, Condvar, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}, fairrwlock::FairRwLock}; #[cfg(not(feature = "std"))] mod nostd_sync; diff --git a/lightning/src/util/fairrwlock.rs b/lightning/src/util/fairrwlock.rs deleted file mode 100644 index 5715a8cf6..000000000 --- a/lightning/src/util/fairrwlock.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult}; -use std::sync::atomic::{AtomicUsize, Ordering}; - -/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on -/// Linux with pthreads under the hood, readers trivially and completely starve writers). -/// Because we often hold read locks while doing message processing in multiple threads which -/// can use significant CPU time, with write locks being time-sensitive but relatively small in -/// CPU time, we can end up with starvation completely blocking incoming connections or pings, -/// especially during initial graph sync. -/// -/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock -/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by -/// blocking readers (by taking the write lock) if there are writers pending when we go to take -/// a read lock. -pub struct FairRwLock { - lock: RwLock, - waiting_writers: AtomicUsize, -} - -impl FairRwLock { - pub fn new(t: T) -> Self { - Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) } - } - - // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any - // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated - // memory. - pub fn write(&self) -> LockResult> { - self.waiting_writers.fetch_add(1, Ordering::Relaxed); - let res = self.lock.write(); - self.waiting_writers.fetch_sub(1, Ordering::Relaxed); - res - } - - pub fn read(&self) -> LockResult> { - if self.waiting_writers.load(Ordering::Relaxed) != 0 { - let _write_queue_lock = self.lock.write(); - } - // Note that we don't consider ensuring that an underlying RwLock allowing writers to - // starve readers doesn't exhibit the same behavior here. I'm not aware of any - // libstd-backing RwLock which exhibits this behavior, and as documented in the - // struct-level documentation, it shouldn't pose a significant issue for our current - // codebase. - self.lock.read() - } - - pub fn try_write(&self) -> TryLockResult> { - self.lock.try_write() - } -} diff --git a/lightning/src/util/mod.rs b/lightning/src/util/mod.rs index 1673bd07f..7bcbc5a41 100644 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@ -27,8 +27,6 @@ pub mod wakers; pub(crate) mod atomic_counter; pub(crate) mod byte_utils; pub(crate) mod chacha20; -#[cfg(all(any(feature = "_bench_unstable", not(test)), feature = "std"))] -pub(crate) mod fairrwlock; #[cfg(fuzzing)] pub mod zbase32; #[cfg(not(fuzzing))]