X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Ffairrwlock.rs;fp=lightning%2Fsrc%2Futil%2Ffairrwlock.rs;h=8dd74f2b53dc1697b5c8d4ad2e22eb231c84ba4e;hb=ae4ceb71a584f0aa9e0c1a14a6219d87f1668eba;hp=0000000000000000000000000000000000000000;hpb=f90983180eac9950256d47cb5eb77353f19fec6b;p=rust-lightning diff --git a/lightning/src/util/fairrwlock.rs b/lightning/src/util/fairrwlock.rs new file mode 100644 index 00000000..8dd74f2b --- /dev/null +++ b/lightning/src/util/fairrwlock.rs @@ -0,0 +1,50 @@ +use std::sync::{TryLockResult, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +/// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on +/// Linux with pthreads under the hood, readers trivially and completely starve writers). +/// Because we often hold read locks while doing message processing in multiple threads which +/// can use significant CPU time, with write locks being time-sensitive but relatively small in +/// CPU time, we can end up with starvation completely blocking incoming connections or pings, +/// especially during initial graph sync. +/// +/// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock +/// wrapper here. Its not particularly optimized, but provides some reasonable fairness by +/// blocking readers (by taking the write lock) if there are writers pending when we go to take +/// a read lock. +pub struct FairRwLock { + lock: RwLock, + waiting_writers: AtomicUsize, +} + +impl FairRwLock { + pub fn new(t: T) -> Self { + Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) } + } + + // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any + // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated + // memory. + pub fn write(&self) -> LockResult> { + self.waiting_writers.fetch_add(1, Ordering::Relaxed); + let res = self.lock.write(); + self.waiting_writers.fetch_sub(1, Ordering::Relaxed); + res + } + + pub fn try_write(&self) -> TryLockResult> { + self.lock.try_write() + } + + pub fn read(&self) -> LockResult> { + if self.waiting_writers.load(Ordering::Relaxed) != 0 { + let _write_queue_lock = self.lock.write(); + } + // Note that we don't consider ensuring that an underlying RwLock allowing writers to + // starve readers doesn't exhibit the same behavior here. I'm not aware of any + // libstd-backing RwLock which exhibits this behavior, and as documented in the + // struct-level documentation, it shouldn't pose a significant issue for our current + // codebase. + self.lock.read() + } +}