]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/sync/fairrwlock.rs
Impl `Sync` and `Send` for `TestStore`
[rust-lightning] / lightning / src / sync / fairrwlock.rs
1 use std::sync::{LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, TryLockResult};
2 use std::sync::atomic::{AtomicUsize, Ordering};
3 use super::{LockHeldState, LockTestExt};
4
5 /// Rust libstd's RwLock does not provide any fairness guarantees (and, in fact, when used on
6 /// Linux with pthreads under the hood, readers trivially and completely starve writers).
7 /// Because we often hold read locks while doing message processing in multiple threads which
8 /// can use significant CPU time, with write locks being time-sensitive but relatively small in
9 /// CPU time, we can end up with starvation completely blocking incoming connections or pings,
10 /// especially during initial graph sync.
11 ///
12 /// Thus, we need to block readers when a writer is pending, which we do with a trivial RwLock
13 /// wrapper here. Its not particularly optimized, but provides some reasonable fairness by
14 /// blocking readers (by taking the write lock) if there are writers pending when we go to take
15 /// a read lock.
16 pub struct FairRwLock<T> {
17         lock: RwLock<T>,
18         waiting_writers: AtomicUsize,
19 }
20
21 impl<T> FairRwLock<T> {
22         pub fn new(t: T) -> Self {
23                 Self { lock: RwLock::new(t), waiting_writers: AtomicUsize::new(0) }
24         }
25
26         // Note that all atomic accesses are relaxed, as we do not rely on the atomics here for any
27         // ordering at all, instead relying on the underlying RwLock to provide ordering of unrelated
28         // memory.
29         pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
30                 self.waiting_writers.fetch_add(1, Ordering::Relaxed);
31                 let res = self.lock.write();
32                 self.waiting_writers.fetch_sub(1, Ordering::Relaxed);
33                 res
34         }
35
36         pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
37                 if self.waiting_writers.load(Ordering::Relaxed) != 0 {
38                         let _write_queue_lock = self.lock.write();
39                 }
40                 // Note that we don't consider ensuring that an underlying RwLock allowing writers to
41                 // starve readers doesn't exhibit the same behavior here. I'm not aware of any
42                 // libstd-backing RwLock which exhibits this behavior, and as documented in the
43                 // struct-level documentation, it shouldn't pose a significant issue for our current
44                 // codebase.
45                 self.lock.read()
46         }
47
48         #[allow(dead_code)]
49         pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
50                 self.lock.try_write()
51         }
52 }
53
54 impl<'a, T: 'a> LockTestExt<'a> for FairRwLock<T> {
55         #[inline]
56         fn held_by_thread(&self) -> LockHeldState {
57                 // fairrwlock is only built in non-test modes, so we should never support tests.
58                 LockHeldState::Unsupported
59         }
60         type ExclLock = RwLockWriteGuard<'a, T>;
61         #[inline]
62         fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> {
63                 self.write().unwrap()
64         }
65 }