5715a8cf646cd67e29b7aa5c21c8722d89a45964
[rust-lightning] / lightning / src / sync / fairrwlock.rs
1 use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
2 use std::sync::atomic::{AtomicUsize, Ordering};
3
4 /// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
5 /// Linux with pthreads under the hood, readers trivially and completely starve writers).
6 /// Because we often hold read locks while doing message processing in multiple threads which
7 /// can use significant CPU time, with write locks being time-sensitive but relatively small in
8 /// CPU time, we can end up with starvation completely blocking incoming connections or pings,
9 /// especially during initial graph sync.
10 ///
11 /// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
12 /// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
13 /// blocking readers (by taking the write lock) if there are writers pending when we go to take
14 /// a read lock.
15 pub struct FairRwLock<T> {
16         lock: RwLock<T>,
17         waiting_writers: AtomicUsize,
18 }
19
20 impl<T> FairRwLock<T> {
21         pub fn new(t: T) -> Self {
22                 Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
23         }
24
25         // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
26         // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
27         // memory.
28         pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
29                 self.waiting_writers.fetch_add(1, Ordering::Relaxed);
30                 let res = self.lock.write();
31                 self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
32                 res
33         }
34
35         pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
36                 if self.waiting_writers.load(Ordering::Relaxed) != 0 {
37                         let _write_queue_lock = self.lock.write();
38                 }
39                 // Note that we don't consider ensuring that an underlying RwLock allowing writers to
40                 // starve readers doesn't exhibit the same behavior here. I'm not aware of any
41                 // libstd-backing RwLock which exhibits this behavior, and as documented in the
42                 // struct-level documentation, it shouldn't pose a significant issue for our current
43                 // codebase.
44                 self.lock.read()
45         }
46
47         pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
48                 self.lock.try_write()
49         }
50 }