Merge pull request #2006 from TheBlueMatt/2023-02-no-recursive-read-locks
[rust-lightning] / lightning / src / sync / fairrwlock.rs
1 use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
2 use std::sync::atomic::{AtomicUsize, Ordering};
3 use super::{LockHeldState, LockTestExt};
4
5 /// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
6 /// Linux with pthreads under the hood, readers trivially and completely starve writers).
7 /// Because we often hold read locks while doing message processing in multiple threads which
8 /// can use significant CPU time, with write locks being time-sensitive but relatively small in
9 /// CPU time, we can end up with starvation completely blocking incoming connections or pings,
10 /// especially during initial graph sync.
11 ///
12 /// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
13 /// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
14 /// blocking readers (by taking the write lock) if there are writers pending when we go to take
15 /// a read lock.
16 pub struct FairRwLock<T> {
17         lock: RwLock<T>,
18         waiting_writers: AtomicUsize,
19 }
20
21 impl<T> FairRwLock<T> {
22         pub fn new(t: T) -> Self {
23                 Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
24         }
25
26         // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
27         // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
28         // memory.
29         pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
30                 self.waiting_writers.fetch_add(1, Ordering::Relaxed);
31                 let res = self.lock.write();
32                 self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
33                 res
34         }
35
36         pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
37                 if self.waiting_writers.load(Ordering::Relaxed) != 0 {
38                         let _write_queue_lock = self.lock.write();
39                 }
40                 // Note that we don't consider ensuring that an underlying RwLock allowing writers to
41                 // starve readers doesn't exhibit the same behavior here. I'm not aware of any
42                 // libstd-backing RwLock which exhibits this behavior, and as documented in the
43                 // struct-level documentation, it shouldn't pose a significant issue for our current
44                 // codebase.
45                 self.lock.read()
46         }
47
48         pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
49                 self.lock.try_write()
50         }
51 }
52
53 impl<'a, T: 'a> LockTestExt<'a> for FairRwLock<T> {
54         #[inline]
55         fn held_by_thread(&self) -> LockHeldState {
56                 // fairrwlock is only built in non-test modes, so we should never support tests.
57                 LockHeldState::Unsupported
58         }
59         type ExclLock = RwLockWriteGuard<'a, T>;
60         #[inline]
61         fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
62                 self.write().unwrap()
63         }
64 }