f953ba1c753750133b6e79a3cc43eb1e05c81962
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         match event {
245                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
246                         let mut score = scorer.write_lock();
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         let mut score = scorer.write_lock();
253                         score.probe_successful(path);
254                 },
255                 Event::PaymentPathSuccessful { path, .. } => {
256                         let mut score = scorer.write_lock();
257                         score.payment_path_successful(path);
258                 },
259                 Event::ProbeSuccessful { path, .. } => {
260                         let mut score = scorer.write_lock();
261                         score.probe_successful(path);
262                 },
263                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
264                         let mut score = scorer.write_lock();
265                         score.probe_failed(path, *scid);
266                 },
267                 _ => return false,
268         }
269         true
270 }
271
272 macro_rules! define_run_body {
273         (
274                 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
275                 $channel_manager: ident, $process_channel_manager_events: expr,
276                 $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
277                 $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
278                 $check_slow_await: expr
279         ) => { {
280                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
281                 $channel_manager.timer_tick_occurred();
282                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
283                 $chain_monitor.rebroadcast_pending_claims();
284
285                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
286                 let mut last_ping_call = $get_timer(PING_TIMER);
287                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
288                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
289                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
290                 let mut have_pruned = false;
291
292                 loop {
293                         $process_channel_manager_events;
294                         $process_chain_monitor_events;
295
296                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
297                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
298                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
299                         // without running the normal event processing above and handing events to users.
300                         //
301                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
302                         // processing a message effectively at any point during this loop. In order to
303                         // minimize the time between such processing completing and persisting the updated
304                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
305                         // generally, and as a fallback place such blocking only immediately before
306                         // persistence.
307                         $peer_manager.as_ref().process_events();
308
309                         // Exit the loop if the background processor was requested to stop.
310                         if $loop_exit_check {
311                                 log_trace!($logger, "Terminating background processor.");
312                                 break;
313                         }
314
315                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
316                         // see `await_start`'s use below.
317                         let mut await_start = None;
318                         if $check_slow_await { await_start = Some($get_timer(1)); }
319                         $await;
320                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
321
322                         // Exit the loop if the background processor was requested to stop.
323                         if $loop_exit_check {
324                                 log_trace!($logger, "Terminating background processor.");
325                                 break;
326                         }
327
328                         if $channel_manager.get_and_clear_needs_persistence() {
329                                 log_trace!($logger, "Persisting ChannelManager...");
330                                 $persister.persist_manager(&*$channel_manager)?;
331                                 log_trace!($logger, "Done persisting ChannelManager.");
332                         }
333                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
334                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
335                                 $channel_manager.timer_tick_occurred();
336                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
337                         }
338                         if await_slow {
339                                 // On various platforms, we may be starved of CPU cycles for several reasons.
340                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
341                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
342                                 // may not get any cycles.
343                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
344                                 // full second, at which point we assume sockets may have been killed (they
345                                 // appear to be at least on some platforms, even if it has only been a second).
346                                 // Note that we have to take care to not get here just because user event
347                                 // processing was slow at the top of the loop. For example, the sample client
348                                 // may call Bitcoin Core RPCs during event handling, which very often takes
349                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
350                                 // peers.
351                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
352                                 $peer_manager.as_ref().disconnect_all_peers();
353                                 last_ping_call = $get_timer(PING_TIMER);
354                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
355                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
356                                 $peer_manager.as_ref().timer_tick_occurred();
357                                 last_ping_call = $get_timer(PING_TIMER);
358                         }
359
360                         // Note that we want to run a graph prune once not long after startup before
361                         // falling back to our usual hourly prunes. This avoids short-lived clients never
362                         // pruning their network graph. We run once 60 seconds after startup before
363                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
364                         // we prune after an initial sync completes.
365                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
366                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
367                         let should_prune = match $gossip_sync {
368                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
369                                 _ => prune_timer_elapsed,
370                         };
371                         if should_prune {
372                                 // The network graph must not be pruned while rapid sync completion is pending
373                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
374                                         #[cfg(feature = "std")] {
375                                                 log_trace!($logger, "Pruning and persisting network graph.");
376                                                 network_graph.remove_stale_channels_and_tracking();
377                                         }
378                                         #[cfg(not(feature = "std"))] {
379                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
380                                                 log_trace!($logger, "Persisting network graph.");
381                                         }
382
383                                         if let Err(e) = $persister.persist_graph(network_graph) {
384                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
385                                         }
386
387                                         have_pruned = true;
388                                 }
389                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
390                                 last_prune_call = $get_timer(prune_timer);
391                         }
392
393                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
394                                 if let Some(ref scorer) = $scorer {
395                                         log_trace!($logger, "Persisting scorer");
396                                         if let Err(e) = $persister.persist_scorer(&scorer) {
397                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
398                                         }
399                                 }
400                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
401                         }
402
403                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
404                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
405                                 $chain_monitor.rebroadcast_pending_claims();
406                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
407                         }
408                 }
409
410                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
411                 // some races where users quit while channel updates were in-flight, with
412                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
413                 $persister.persist_manager(&*$channel_manager)?;
414
415                 // Persist Scorer on exit
416                 if let Some(ref scorer) = $scorer {
417                         $persister.persist_scorer(&scorer)?;
418                 }
419
420                 // Persist NetworkGraph on exit
421                 if let Some(network_graph) = $gossip_sync.network_graph() {
422                         $persister.persist_graph(network_graph)?;
423                 }
424
425                 Ok(())
426         } }
427 }
428
429 #[cfg(feature = "futures")]
430 pub(crate) mod futures_util {
431         use core::future::Future;
432         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
433         use core::pin::Pin;
434         use core::marker::Unpin;
435         pub(crate) struct Selector<
436                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
437         > {
438                 pub a: A,
439                 pub b: B,
440                 pub c: C,
441         }
442         pub(crate) enum SelectorOutput {
443                 A, B, C(bool),
444         }
445
446         impl<
447                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
448         > Future for Selector<A, B, C> {
449                 type Output = SelectorOutput;
450                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
451                         match Pin::new(&mut self.a).poll(ctx) {
452                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
453                                 Poll::Pending => {},
454                         }
455                         match Pin::new(&mut self.b).poll(ctx) {
456                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
457                                 Poll::Pending => {},
458                         }
459                         match Pin::new(&mut self.c).poll(ctx) {
460                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
461                                 Poll::Pending => {},
462                         }
463                         Poll::Pending
464                 }
465         }
466
467         // If we want to poll a future without an async context to figure out if it has completed or
468         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
469         // but sadly there's a good bit of boilerplate here.
470         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
471         fn dummy_waker_action(_: *const ()) { }
472
473         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
474                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
475         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
476 }
477 #[cfg(feature = "futures")]
478 use futures_util::{Selector, SelectorOutput, dummy_waker};
479 #[cfg(feature = "futures")]
480 use core::task;
481
482 /// Processes background events in a future.
483 ///
484 /// `sleeper` should return a future which completes in the given amount of time and returns a
485 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
486 /// future which outputs `true`, the loop will exit and this function's future will complete.
487 /// The `sleeper` future is free to return early after it has triggered the exit condition.
488 ///
489 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
490 ///
491 /// Requires the `futures` feature. Note that while this method is available without the `std`
492 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
493 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
494 /// manually instead.
495 ///
496 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
497 /// mobile device, where we may need to check for interruption of the application regularly. If you
498 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
499 /// are hundreds or thousands of simultaneous process calls running.
500 ///
501 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
502 /// could setup `process_events_async` like this:
503 /// ```
504 /// # use lightning::io;
505 /// # use std::sync::{Arc, RwLock};
506 /// # use std::sync::atomic::{AtomicBool, Ordering};
507 /// # use lightning_background_processor::{process_events_async, GossipSync};
508 /// # struct MyStore {}
509 /// # impl lightning::util::persist::KVStore for MyStore {
510 /// #     fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
511 /// #     fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
512 /// #     fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
513 /// #     fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
514 /// # }
515 /// # struct MyEventHandler {}
516 /// # impl MyEventHandler {
517 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
518 /// # }
519 /// # #[derive(Eq, PartialEq, Clone, Hash)]
520 /// # struct MySocketDescriptor {}
521 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
522 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
523 /// #     fn disconnect_socket(&mut self) {}
524 /// # }
525 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
526 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
527 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
528 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
529 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
530 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
531 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
532 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
533 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
534 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
535 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
536 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
537 ///
538 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
539 ///     let background_persister = Arc::clone(&my_persister);
540 ///     let background_event_handler = Arc::clone(&my_event_handler);
541 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
542 ///     let background_chan_man = Arc::clone(&my_channel_manager);
543 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
544 ///     let background_peer_man = Arc::clone(&my_peer_manager);
545 ///     let background_logger = Arc::clone(&my_logger);
546 ///     let background_scorer = Arc::clone(&my_scorer);
547 ///
548 ///     // Setup the sleeper.
549 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
550 ///
551 ///     let sleeper = move |d| {
552 ///             let mut receiver = stop_receiver.clone();
553 ///             Box::pin(async move {
554 ///                     tokio::select!{
555 ///                             _ = tokio::time::sleep(d) => false,
556 ///                             _ = receiver.changed() => true,
557 ///                     }
558 ///             })
559 ///     };
560 ///
561 ///     let mobile_interruptable_platform = false;
562 ///
563 ///     let handle = tokio::spawn(async move {
564 ///             process_events_async(
565 ///                     background_persister,
566 ///                     |e| background_event_handler.handle_event(e),
567 ///                     background_chain_mon,
568 ///                     background_chan_man,
569 ///                     background_gossip_sync,
570 ///                     background_peer_man,
571 ///                     background_logger,
572 ///                     Some(background_scorer),
573 ///                     sleeper,
574 ///                     mobile_interruptable_platform,
575 ///                     )
576 ///                     .await
577 ///                     .expect("Failed to process events");
578 ///     });
579 ///
580 ///     // Stop the background processing.
581 ///     stop_sender.send(()).unwrap();
582 ///     handle.await.unwrap();
583 ///     # }
584 ///```
585 #[cfg(feature = "futures")]
586 pub async fn process_events_async<
587         'a,
588         UL: 'static + Deref + Send + Sync,
589         CF: 'static + Deref + Send + Sync,
590         CW: 'static + Deref + Send + Sync,
591         T: 'static + Deref + Send + Sync,
592         ES: 'static + Deref + Send + Sync,
593         NS: 'static + Deref + Send + Sync,
594         SP: 'static + Deref + Send + Sync,
595         F: 'static + Deref + Send + Sync,
596         R: 'static + Deref + Send + Sync,
597         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
598         L: 'static + Deref + Send + Sync,
599         P: 'static + Deref + Send + Sync,
600         EventHandlerFuture: core::future::Future<Output = ()>,
601         EventHandler: Fn(Event) -> EventHandlerFuture,
602         PS: 'static + Deref + Send,
603         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
604         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
605         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
606         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
607         PM: 'static + Deref + Send + Sync,
608         S: 'static + Deref<Target = SC> + Send + Sync,
609         SC: for<'b> WriteableScore<'b>,
610         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
611         Sleeper: Fn(Duration) -> SleepFuture
612 >(
613         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
614         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
615         sleeper: Sleeper, mobile_interruptable_platform: bool,
616 ) -> Result<(), lightning::io::Error>
617 where
618         UL::Target: 'static + UtxoLookup,
619         CF::Target: 'static + chain::Filter,
620         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
621         T::Target: 'static + BroadcasterInterface,
622         ES::Target: 'static + EntropySource,
623         NS::Target: 'static + NodeSigner,
624         SP::Target: 'static + SignerProvider,
625         F::Target: 'static + FeeEstimator,
626         R::Target: 'static + Router,
627         L::Target: 'static + Logger,
628         P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
629         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
630         PM::Target: APeerManager + Send + Sync,
631 {
632         let mut should_break = false;
633         let async_event_handler = |event| {
634                 let network_graph = gossip_sync.network_graph();
635                 let event_handler = &event_handler;
636                 let scorer = &scorer;
637                 let logger = &logger;
638                 let persister = &persister;
639                 async move {
640                         if let Some(network_graph) = network_graph {
641                                 handle_network_graph_update(network_graph, &event)
642                         }
643                         if let Some(ref scorer) = scorer {
644                                 if update_scorer(scorer, &event) {
645                                         log_trace!(logger, "Persisting scorer after update");
646                                         if let Err(e) = persister.persist_scorer(&scorer) {
647                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
648                                         }
649                                 }
650                         }
651                         event_handler(event).await;
652                 }
653         };
654         define_run_body!(
655                 persister, chain_monitor,
656                 chain_monitor.process_pending_events_async(async_event_handler).await,
657                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
658                 gossip_sync, peer_manager, logger, scorer, should_break, {
659                         let fut = Selector {
660                                 a: channel_manager.get_event_or_persistence_needed_future(),
661                                 b: chain_monitor.get_update_future(),
662                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
663                         };
664                         match fut.await {
665                                 SelectorOutput::A|SelectorOutput::B => {},
666                                 SelectorOutput::C(exit) => {
667                                         should_break = exit;
668                                 }
669                         }
670                 }, |t| sleeper(Duration::from_secs(t)),
671                 |fut: &mut SleepFuture, _| {
672                         let mut waker = dummy_waker();
673                         let mut ctx = task::Context::from_waker(&mut waker);
674                         match core::pin::Pin::new(fut).poll(&mut ctx) {
675                                 task::Poll::Ready(exit) => { should_break = exit; true },
676                                 task::Poll::Pending => false,
677                         }
678                 }, mobile_interruptable_platform
679         )
680 }
681
682 #[cfg(feature = "std")]
683 impl BackgroundProcessor {
684         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
685         /// documentation].
686         ///
687         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
688         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
689         /// either [`join`] or [`stop`].
690         ///
691         /// # Data Persistence
692         ///
693         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
694         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
695         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
696         /// provided implementation.
697         ///
698         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
699         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
700         /// See the `lightning-persister` crate for LDK's provided implementation.
701         ///
702         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
703         /// error or call [`join`] and handle any error that may arise. For the latter case,
704         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
705         ///
706         /// # Event Handling
707         ///
708         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
709         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
710         /// functionality implemented by other handlers.
711         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
712         ///
713         /// # Rapid Gossip Sync
714         ///
715         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
716         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
717         /// until the [`RapidGossipSync`] instance completes its first sync.
718         ///
719         /// [top-level documentation]: BackgroundProcessor
720         /// [`join`]: Self::join
721         /// [`stop`]: Self::stop
722         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
723         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
724         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
725         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
726         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
727         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
728         pub fn start<
729                 'a,
730                 UL: 'static + Deref + Send + Sync,
731                 CF: 'static + Deref + Send + Sync,
732                 CW: 'static + Deref + Send + Sync,
733                 T: 'static + Deref + Send + Sync,
734                 ES: 'static + Deref + Send + Sync,
735                 NS: 'static + Deref + Send + Sync,
736                 SP: 'static + Deref + Send + Sync,
737                 F: 'static + Deref + Send + Sync,
738                 R: 'static + Deref + Send + Sync,
739                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
740                 L: 'static + Deref + Send + Sync,
741                 P: 'static + Deref + Send + Sync,
742                 EH: 'static + EventHandler + Send,
743                 PS: 'static + Deref + Send,
744                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
745                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
746                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
747                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
748                 PM: 'static + Deref + Send + Sync,
749                 S: 'static + Deref<Target = SC> + Send + Sync,
750                 SC: for <'b> WriteableScore<'b>,
751         >(
752                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
753                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
754         ) -> Self
755         where
756                 UL::Target: 'static + UtxoLookup,
757                 CF::Target: 'static + chain::Filter,
758                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
759                 T::Target: 'static + BroadcasterInterface,
760                 ES::Target: 'static + EntropySource,
761                 NS::Target: 'static + NodeSigner,
762                 SP::Target: 'static + SignerProvider,
763                 F::Target: 'static + FeeEstimator,
764                 R::Target: 'static + Router,
765                 L::Target: 'static + Logger,
766                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
767                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
768                 PM::Target: APeerManager + Send + Sync,
769         {
770                 let stop_thread = Arc::new(AtomicBool::new(false));
771                 let stop_thread_clone = stop_thread.clone();
772                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
773                         let event_handler = |event| {
774                                 let network_graph = gossip_sync.network_graph();
775                                 if let Some(network_graph) = network_graph {
776                                         handle_network_graph_update(network_graph, &event)
777                                 }
778                                 if let Some(ref scorer) = scorer {
779                                         if update_scorer(scorer, &event) {
780                                                 log_trace!(logger, "Persisting scorer after update");
781                                                 if let Err(e) = persister.persist_scorer(&scorer) {
782                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
783                                                 }
784                                         }
785                                 }
786                                 event_handler.handle_event(event);
787                         };
788                         define_run_body!(
789                                 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
790                                 channel_manager, channel_manager.process_pending_events(&event_handler),
791                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
792                                 { Sleeper::from_two_futures(
793                                         channel_manager.get_event_or_persistence_needed_future(),
794                                         chain_monitor.get_update_future()
795                                 ).wait_timeout(Duration::from_millis(100)); },
796                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false
797                         )
798                 });
799                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
800         }
801
802         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
803         /// [`ChannelManager`].
804         ///
805         /// # Panics
806         ///
807         /// This function panics if the background thread has panicked such as while persisting or
808         /// handling events.
809         ///
810         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
811         pub fn join(mut self) -> Result<(), std::io::Error> {
812                 assert!(self.thread_handle.is_some());
813                 self.join_thread()
814         }
815
816         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
817         /// [`ChannelManager`].
818         ///
819         /// # Panics
820         ///
821         /// This function panics if the background thread has panicked such as while persisting or
822         /// handling events.
823         ///
824         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
825         pub fn stop(mut self) -> Result<(), std::io::Error> {
826                 assert!(self.thread_handle.is_some());
827                 self.stop_and_join_thread()
828         }
829
830         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
831                 self.stop_thread.store(true, Ordering::Release);
832                 self.join_thread()
833         }
834
835         fn join_thread(&mut self) -> Result<(), std::io::Error> {
836                 match self.thread_handle.take() {
837                         Some(handle) => handle.join().unwrap(),
838                         None => Ok(()),
839                 }
840         }
841 }
842
843 #[cfg(feature = "std")]
844 impl Drop for BackgroundProcessor {
845         fn drop(&mut self) {
846                 self.stop_and_join_thread().unwrap();
847         }
848 }
849
850 #[cfg(all(feature = "std", test))]
851 mod tests {
852         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
853         use bitcoin::blockdata::locktime::absolute::LockTime;
854         use bitcoin::blockdata::transaction::{Transaction, TxOut};
855         use bitcoin::network::constants::Network;
856         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
857         use lightning::chain::{BestBlock, Confirm, chainmonitor};
858         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
859         use lightning::sign::{InMemorySigner, KeysManager};
860         use lightning::chain::transaction::OutPoint;
861         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
862         use lightning::{get_event_msg, get_event};
863         use lightning::ln::PaymentHash;
864         use lightning::ln::channelmanager;
865         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
866         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
867         use lightning::ln::functional_test_utils::*;
868         use lightning::ln::msgs::{ChannelMessageHandler, Init};
869         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
870         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
871         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
872         use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
873         use lightning::util::config::UserConfig;
874         use lightning::util::ser::Writeable;
875         use lightning::util::test_utils;
876         use lightning::util::persist::{KVStore,
877                 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
878                 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
879                 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
880         use lightning_persister::fs_store::FilesystemStore;
881         use std::collections::VecDeque;
882         use std::{fs, env};
883         use std::path::PathBuf;
884         use std::sync::{Arc, Mutex};
885         use std::sync::mpsc::SyncSender;
886         use std::time::Duration;
887         use lightning_rapid_gossip_sync::RapidGossipSync;
888         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
889
890         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
891
892         #[derive(Clone, Hash, PartialEq, Eq)]
893         struct TestDescriptor{}
894         impl SocketDescriptor for TestDescriptor {
895                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
896                         0
897                 }
898
899                 fn disconnect_socket(&mut self) {}
900         }
901
902         #[cfg(c_bindings)]
903         type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
904         #[cfg(not(c_bindings))]
905         type LockingWrapper<T> = Mutex<T>;
906
907         type ChannelManager =
908                 channelmanager::ChannelManager<
909                         Arc<ChainMonitor>,
910                         Arc<test_utils::TestBroadcaster>,
911                         Arc<KeysManager>,
912                         Arc<KeysManager>,
913                         Arc<KeysManager>,
914                         Arc<test_utils::TestFeeEstimator>,
915                         Arc<DefaultRouter<
916                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
917                                 Arc<test_utils::TestLogger>,
918                                 Arc<LockingWrapper<TestScorer>>,
919                                 (),
920                                 TestScorer>
921                         >,
922                         Arc<test_utils::TestLogger>>;
923
924         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
925
926         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
927         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
928
929         struct Node {
930                 node: Arc<ChannelManager>,
931                 p2p_gossip_sync: PGS,
932                 rapid_gossip_sync: RGS,
933                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
934                 chain_monitor: Arc<ChainMonitor>,
935                 kv_store: Arc<FilesystemStore>,
936                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
937                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
938                 logger: Arc<test_utils::TestLogger>,
939                 best_block: BestBlock,
940                 scorer: Arc<LockingWrapper<TestScorer>>,
941         }
942
943         impl Node {
944                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
945                         GossipSync::P2P(self.p2p_gossip_sync.clone())
946                 }
947
948                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
949                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
950                 }
951
952                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
953                         GossipSync::None
954                 }
955         }
956
957         impl Drop for Node {
958                 fn drop(&mut self) {
959                         let data_dir = self.kv_store.get_data_dir();
960                         match fs::remove_dir_all(data_dir.clone()) {
961                                 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
962                                 _ => {}
963                         }
964                 }
965         }
966
967         struct Persister {
968                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
969                 graph_persistence_notifier: Option<SyncSender<()>>,
970                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
971                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
972                 kv_store: FilesystemStore,
973         }
974
975         impl Persister {
976                 fn new(data_dir: PathBuf) -> Self {
977                         let kv_store = FilesystemStore::new(data_dir);
978                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
979                 }
980
981                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
982                         Self { graph_error: Some((error, message)), ..self }
983                 }
984
985                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
986                         Self { graph_persistence_notifier: Some(sender), ..self }
987                 }
988
989                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
990                         Self { manager_error: Some((error, message)), ..self }
991                 }
992
993                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
994                         Self { scorer_error: Some((error, message)), ..self }
995                 }
996         }
997
998         impl KVStore for Persister {
999                 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1000                         self.kv_store.read(primary_namespace, secondary_namespace, key)
1001                 }
1002
1003                 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1004                         if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1005                                 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1006                                 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1007                         {
1008                                 if let Some((error, message)) = self.manager_error {
1009                                         return Err(std::io::Error::new(error, message))
1010                                 }
1011                         }
1012
1013                         if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1014                                 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1015                                 key == NETWORK_GRAPH_PERSISTENCE_KEY
1016                         {
1017                                 if let Some(sender) = &self.graph_persistence_notifier {
1018                                         match sender.send(()) {
1019                                                 Ok(()) => {},
1020                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1021                                         }
1022                                 };
1023
1024                                 if let Some((error, message)) = self.graph_error {
1025                                         return Err(std::io::Error::new(error, message))
1026                                 }
1027                         }
1028
1029                         if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1030                                 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1031                                 key == SCORER_PERSISTENCE_KEY
1032                         {
1033                                 if let Some((error, message)) = self.scorer_error {
1034                                         return Err(std::io::Error::new(error, message))
1035                                 }
1036                         }
1037
1038                         self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1039                 }
1040
1041                 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1042                         self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1043                 }
1044
1045                 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1046                         self.kv_store.list(primary_namespace, secondary_namespace)
1047                 }
1048         }
1049
1050         struct TestScorer {
1051                 event_expectations: Option<VecDeque<TestResult>>,
1052         }
1053
1054         #[derive(Debug)]
1055         enum TestResult {
1056                 PaymentFailure { path: Path, short_channel_id: u64 },
1057                 PaymentSuccess { path: Path },
1058                 ProbeFailure { path: Path },
1059                 ProbeSuccess { path: Path },
1060         }
1061
1062         impl TestScorer {
1063                 fn new() -> Self {
1064                         Self { event_expectations: None }
1065                 }
1066
1067                 fn expect(&mut self, expectation: TestResult) {
1068                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1069                 }
1070         }
1071
1072         impl lightning::util::ser::Writeable for TestScorer {
1073                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1074         }
1075
1076         impl ScoreLookUp for TestScorer {
1077                 type ScoreParams = ();
1078                 fn channel_penalty_msat(
1079                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1080                 ) -> u64 { unimplemented!(); }
1081         }
1082
1083         impl ScoreUpdate for TestScorer {
1084                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1085                         if let Some(expectations) = &mut self.event_expectations {
1086                                 match expectations.pop_front().unwrap() {
1087                                         TestResult::PaymentFailure { path, short_channel_id } => {
1088                                                 assert_eq!(actual_path, &path);
1089                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1090                                         },
1091                                         TestResult::PaymentSuccess { path } => {
1092                                                 panic!("Unexpected successful payment path: {:?}", path)
1093                                         },
1094                                         TestResult::ProbeFailure { path } => {
1095                                                 panic!("Unexpected probe failure: {:?}", path)
1096                                         },
1097                                         TestResult::ProbeSuccess { path } => {
1098                                                 panic!("Unexpected probe success: {:?}", path)
1099                                         }
1100                                 }
1101                         }
1102                 }
1103
1104                 fn payment_path_successful(&mut self, actual_path: &Path) {
1105                         if let Some(expectations) = &mut self.event_expectations {
1106                                 match expectations.pop_front().unwrap() {
1107                                         TestResult::PaymentFailure { path, .. } => {
1108                                                 panic!("Unexpected payment path failure: {:?}", path)
1109                                         },
1110                                         TestResult::PaymentSuccess { path } => {
1111                                                 assert_eq!(actual_path, &path);
1112                                         },
1113                                         TestResult::ProbeFailure { path } => {
1114                                                 panic!("Unexpected probe failure: {:?}", path)
1115                                         },
1116                                         TestResult::ProbeSuccess { path } => {
1117                                                 panic!("Unexpected probe success: {:?}", path)
1118                                         }
1119                                 }
1120                         }
1121                 }
1122
1123                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1124                         if let Some(expectations) = &mut self.event_expectations {
1125                                 match expectations.pop_front().unwrap() {
1126                                         TestResult::PaymentFailure { path, .. } => {
1127                                                 panic!("Unexpected payment path failure: {:?}", path)
1128                                         },
1129                                         TestResult::PaymentSuccess { path } => {
1130                                                 panic!("Unexpected payment path success: {:?}", path)
1131                                         },
1132                                         TestResult::ProbeFailure { path } => {
1133                                                 assert_eq!(actual_path, &path);
1134                                         },
1135                                         TestResult::ProbeSuccess { path } => {
1136                                                 panic!("Unexpected probe success: {:?}", path)
1137                                         }
1138                                 }
1139                         }
1140                 }
1141                 fn probe_successful(&mut self, actual_path: &Path) {
1142                         if let Some(expectations) = &mut self.event_expectations {
1143                                 match expectations.pop_front().unwrap() {
1144                                         TestResult::PaymentFailure { path, .. } => {
1145                                                 panic!("Unexpected payment path failure: {:?}", path)
1146                                         },
1147                                         TestResult::PaymentSuccess { path } => {
1148                                                 panic!("Unexpected payment path success: {:?}", path)
1149                                         },
1150                                         TestResult::ProbeFailure { path } => {
1151                                                 panic!("Unexpected probe failure: {:?}", path)
1152                                         },
1153                                         TestResult::ProbeSuccess { path } => {
1154                                                 assert_eq!(actual_path, &path);
1155                                         }
1156                                 }
1157                         }
1158                 }
1159         }
1160
1161         #[cfg(c_bindings)]
1162         impl lightning::routing::scoring::Score for TestScorer {}
1163
1164         impl Drop for TestScorer {
1165                 fn drop(&mut self) {
1166                         if std::thread::panicking() {
1167                                 return;
1168                         }
1169
1170                         if let Some(event_expectations) = &self.event_expectations {
1171                                 if !event_expectations.is_empty() {
1172                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1173                                 }
1174                         }
1175                 }
1176         }
1177
1178         fn get_full_filepath(filepath: String, filename: String) -> String {
1179                 let mut path = PathBuf::from(filepath);
1180                 path.push(filename);
1181                 path.to_str().unwrap().to_string()
1182         }
1183
1184         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1185                 let persist_temp_path = env::temp_dir().join(persist_dir);
1186                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1187                 let network = Network::Bitcoin;
1188                 let mut nodes = Vec::new();
1189                 for i in 0..num_nodes {
1190                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1191                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1192                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1193                         let genesis_block = genesis_block(network);
1194                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1195                         let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1196                         let seed = [i as u8; 32];
1197                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1198                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1199                         let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1200                         let now = Duration::from_secs(genesis_block.header.time as u64);
1201                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1202                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1203                         let best_block = BestBlock::from_network(network);
1204                         let params = ChainParameters { network, best_block };
1205                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1206                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1207                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1208                         let msg_handler = MessageHandler {
1209                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1210                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1211                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1212                         };
1213                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1214                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1215                         nodes.push(node);
1216                 }
1217
1218                 for i in 0..num_nodes {
1219                         for j in (i+1)..num_nodes {
1220                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1221                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1222                                 }, true).unwrap();
1223                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1224                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1225                                 }, false).unwrap();
1226                         }
1227                 }
1228
1229                 (persist_dir, nodes)
1230         }
1231
1232         macro_rules! open_channel {
1233                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1234                         begin_open_channel!($node_a, $node_b, $channel_value);
1235                         let events = $node_a.node.get_and_clear_pending_events();
1236                         assert_eq!(events.len(), 1);
1237                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1238                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1239                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1240                         get_event!($node_b, Event::ChannelPending);
1241                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1242                         get_event!($node_a, Event::ChannelPending);
1243                         tx
1244                 }}
1245         }
1246
1247         macro_rules! begin_open_channel {
1248                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1249                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1250                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1251                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1252                 }}
1253         }
1254
1255         macro_rules! handle_funding_generation_ready {
1256                 ($event: expr, $channel_value: expr) => {{
1257                         match $event {
1258                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1259                                         assert_eq!(channel_value_satoshis, $channel_value);
1260                                         assert_eq!(user_channel_id, 42);
1261
1262                                         let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1263                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1264                                         }]};
1265                                         (temporary_channel_id, tx)
1266                                 },
1267                                 _ => panic!("Unexpected event"),
1268                         }
1269                 }}
1270         }
1271
1272         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1273                 for i in 1..=depth {
1274                         let prev_blockhash = node.best_block.block_hash();
1275                         let height = node.best_block.height() + 1;
1276                         let header = create_dummy_header(prev_blockhash, height);
1277                         let txdata = vec![(0, tx)];
1278                         node.best_block = BestBlock::new(header.block_hash(), height);
1279                         match i {
1280                                 1 => {
1281                                         node.node.transactions_confirmed(&header, &txdata, height);
1282                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1283                                 },
1284                                 x if x == depth => {
1285                                         node.node.best_block_updated(&header, height);
1286                                         node.chain_monitor.best_block_updated(&header, height);
1287                                 },
1288                                 _ => {},
1289                         }
1290                 }
1291         }
1292         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1293                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1294         }
1295
1296         #[test]
1297         fn test_background_processor() {
1298                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1299                 // updates. Also test that when new updates are available, the manager signals that it needs
1300                 // re-persistence and is successfully re-persisted.
1301                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1302
1303                 // Go through the channel creation process so that each node has something to persist. Since
1304                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1305                 // avoid a race with processing events.
1306                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1307
1308                 // Initiate the background processors to watch each node.
1309                 let data_dir = nodes[0].kv_store.get_data_dir();
1310                 let persister = Arc::new(Persister::new(data_dir));
1311                 let event_handler = |_: _| {};
1312                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1313
1314                 macro_rules! check_persisted_data {
1315                         ($node: expr, $filepath: expr) => {
1316                                 let mut expected_bytes = Vec::new();
1317                                 loop {
1318                                         expected_bytes.clear();
1319                                         match $node.write(&mut expected_bytes) {
1320                                                 Ok(()) => {
1321                                                         match std::fs::read($filepath) {
1322                                                                 Ok(bytes) => {
1323                                                                         if bytes == expected_bytes {
1324                                                                                 break
1325                                                                         } else {
1326                                                                                 continue
1327                                                                         }
1328                                                                 },
1329                                                                 Err(_) => continue
1330                                                         }
1331                                                 },
1332                                                 Err(e) => panic!("Unexpected error: {}", e)
1333                                         }
1334                                 }
1335                         }
1336                 }
1337
1338                 // Check that the initial channel manager data is persisted as expected.
1339                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1340                 check_persisted_data!(nodes[0].node, filepath.clone());
1341
1342                 loop {
1343                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1344                 }
1345
1346                 // Force-close the channel.
1347                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1348
1349                 // Check that the force-close updates are persisted.
1350                 check_persisted_data!(nodes[0].node, filepath.clone());
1351                 loop {
1352                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1353                 }
1354
1355                 // Check network graph is persisted
1356                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1357                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1358
1359                 // Check scorer is persisted
1360                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1361                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1362
1363                 if !std::thread::panicking() {
1364                         bg_processor.stop().unwrap();
1365                 }
1366         }
1367
1368         #[test]
1369         fn test_timer_tick_called() {
1370                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1371                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1372                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1373                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1374                 let data_dir = nodes[0].kv_store.get_data_dir();
1375                 let persister = Arc::new(Persister::new(data_dir));
1376                 let event_handler = |_: _| {};
1377                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1378                 loop {
1379                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1380                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1381                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1382                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1383                         if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1384                                 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1385                                 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() {
1386                                 break
1387                         }
1388                 }
1389
1390                 if !std::thread::panicking() {
1391                         bg_processor.stop().unwrap();
1392                 }
1393         }
1394
1395         #[test]
1396         fn test_channel_manager_persist_error() {
1397                 // Test that if we encounter an error during manager persistence, the thread panics.
1398                 let (_, nodes) = create_nodes(2, "test_persist_error");
1399                 open_channel!(nodes[0], nodes[1], 100000);
1400
1401                 let data_dir = nodes[0].kv_store.get_data_dir();
1402                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1403                 let event_handler = |_: _| {};
1404                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1405                 match bg_processor.join() {
1406                         Ok(_) => panic!("Expected error persisting manager"),
1407                         Err(e) => {
1408                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1409                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1410                         },
1411                 }
1412         }
1413
1414         #[tokio::test]
1415         #[cfg(feature = "futures")]
1416         async fn test_channel_manager_persist_error_async() {
1417                 // Test that if we encounter an error during manager persistence, the thread panics.
1418                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1419                 open_channel!(nodes[0], nodes[1], 100000);
1420
1421                 let data_dir = nodes[0].kv_store.get_data_dir();
1422                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1423
1424                 let bp_future = super::process_events_async(
1425                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1426                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1427                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1428                                 Box::pin(async move {
1429                                         tokio::time::sleep(dur).await;
1430                                         false // Never exit
1431                                 })
1432                         }, false,
1433                 );
1434                 match bp_future.await {
1435                         Ok(_) => panic!("Expected error persisting manager"),
1436                         Err(e) => {
1437                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1438                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1439                         },
1440                 }
1441         }
1442
1443         #[test]
1444         fn test_network_graph_persist_error() {
1445                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1446                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1447                 let data_dir = nodes[0].kv_store.get_data_dir();
1448                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1449                 let event_handler = |_: _| {};
1450                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1451
1452                 match bg_processor.stop() {
1453                         Ok(_) => panic!("Expected error persisting network graph"),
1454                         Err(e) => {
1455                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1456                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1457                         },
1458                 }
1459         }
1460
1461         #[test]
1462         fn test_scorer_persist_error() {
1463                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1464                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1465                 let data_dir = nodes[0].kv_store.get_data_dir();
1466                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1467                 let event_handler = |_: _| {};
1468                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1469
1470                 match bg_processor.stop() {
1471                         Ok(_) => panic!("Expected error persisting scorer"),
1472                         Err(e) => {
1473                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1474                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1475                         },
1476                 }
1477         }
1478
1479         #[test]
1480         fn test_background_event_handling() {
1481                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1482                 let channel_value = 100000;
1483                 let data_dir = nodes[0].kv_store.get_data_dir();
1484                 let persister = Arc::new(Persister::new(data_dir.clone()));
1485
1486                 // Set up a background event handler for FundingGenerationReady events.
1487                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1488                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1489                 let event_handler = move |event: Event| match event {
1490                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1491                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1492                         Event::ChannelReady { .. } => {},
1493                         _ => panic!("Unexpected event: {:?}", event),
1494                 };
1495
1496                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1497
1498                 // Open a channel and check that the FundingGenerationReady event was handled.
1499                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1500                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1501                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1502                         .expect("FundingGenerationReady not handled within deadline");
1503                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1504                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1505                 get_event!(nodes[1], Event::ChannelPending);
1506                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1507                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1508                         .expect("ChannelPending not handled within deadline");
1509
1510                 // Confirm the funding transaction.
1511                 confirm_transaction(&mut nodes[0], &funding_tx);
1512                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1513                 confirm_transaction(&mut nodes[1], &funding_tx);
1514                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1515                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1516                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1517                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1518                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1519
1520                 if !std::thread::panicking() {
1521                         bg_processor.stop().unwrap();
1522                 }
1523
1524                 // Set up a background event handler for SpendableOutputs events.
1525                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1526                 let event_handler = move |event: Event| match event {
1527                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1528                         Event::ChannelReady { .. } => {},
1529                         Event::ChannelClosed { .. } => {},
1530                         _ => panic!("Unexpected event: {:?}", event),
1531                 };
1532                 let persister = Arc::new(Persister::new(data_dir));
1533                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1534
1535                 // Force close the channel and check that the SpendableOutputs event was handled.
1536                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1537                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1538                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1539
1540                 let event = receiver
1541                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1542                         .expect("Events not handled within deadline");
1543                 match event {
1544                         Event::SpendableOutputs { .. } => {},
1545                         _ => panic!("Unexpected event: {:?}", event),
1546                 }
1547
1548                 if !std::thread::panicking() {
1549                         bg_processor.stop().unwrap();
1550                 }
1551         }
1552
1553         #[test]
1554         fn test_scorer_persistence() {
1555                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1556                 let data_dir = nodes[0].kv_store.get_data_dir();
1557                 let persister = Arc::new(Persister::new(data_dir));
1558                 let event_handler = |_: _| {};
1559                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1560
1561                 loop {
1562                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1563                         let expected_log = "Persisting scorer".to_string();
1564                         if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1565                                 break
1566                         }
1567                 }
1568
1569                 if !std::thread::panicking() {
1570                         bg_processor.stop().unwrap();
1571                 }
1572         }
1573
1574         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1575                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1576                         let features = ChannelFeatures::empty();
1577                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1578                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1579                         ).expect("Failed to update channel from partial announcement");
1580                         let original_graph_description = $nodes[0].network_graph.to_string();
1581                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1582                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1583
1584                         loop {
1585                                 $sleep;
1586                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1587                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1588                                 if *log_entries.get(&("lightning_background_processor", loop_counter))
1589                                         .unwrap_or(&0) > 1
1590                                 {
1591                                         // Wait until the loop has gone around at least twice.
1592                                         break
1593                                 }
1594                         }
1595
1596                         let initialization_input = vec![
1597                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1598                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1599                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1600                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1601                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1602                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1603                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1604                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1605                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1606                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1607                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1608                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1609                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1610                         ];
1611                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1612
1613                         // this should have added two channels and pruned the previous one.
1614                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1615
1616                         $receive.expect("Network graph not pruned within deadline");
1617
1618                         // all channels should now be pruned
1619                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1620                 }
1621         }
1622
1623         #[test]
1624         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1625                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1626
1627                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1628                 let data_dir = nodes[0].kv_store.get_data_dir();
1629                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1630
1631                 let event_handler = |_: _| {};
1632                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1633
1634                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1635                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1636                         std::thread::sleep(Duration::from_millis(1)));
1637
1638                 background_processor.stop().unwrap();
1639         }
1640
1641         #[tokio::test]
1642         #[cfg(feature = "futures")]
1643         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1644                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1645
1646                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1647                 let data_dir = nodes[0].kv_store.get_data_dir();
1648                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1649
1650                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1651                 let bp_future = super::process_events_async(
1652                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1653                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1654                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1655                                 let mut exit_receiver = exit_receiver.clone();
1656                                 Box::pin(async move {
1657                                         tokio::select! {
1658                                                 _ = tokio::time::sleep(dur) => false,
1659                                                 _ = exit_receiver.changed() => true,
1660                                         }
1661                                 })
1662                         }, false,
1663                 );
1664
1665                 let t1 = tokio::spawn(bp_future);
1666                 let t2 = tokio::spawn(async move {
1667                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1668                                 let mut i = 0;
1669                                 loop {
1670                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1671                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1672                                         assert!(i < 5);
1673                                         i += 1;
1674                                 }
1675                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1676                         exit_sender.send(()).unwrap();
1677                 });
1678                 let (r1, r2) = tokio::join!(t1, t2);
1679                 r1.unwrap().unwrap();
1680                 r2.unwrap()
1681         }
1682
1683         macro_rules! do_test_payment_path_scoring {
1684                 ($nodes: expr, $receive: expr) => {
1685                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1686                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1687                         // public or else we won't score it).
1688                         // A background event handler for FundingGenerationReady events must be hooked up to a
1689                         // running background processor.
1690                         let scored_scid = 4242;
1691                         let secp_ctx = Secp256k1::new();
1692                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1693                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1694
1695                         let path = Path { hops: vec![RouteHop {
1696                                 pubkey: node_1_id,
1697                                 node_features: NodeFeatures::empty(),
1698                                 short_channel_id: scored_scid,
1699                                 channel_features: ChannelFeatures::empty(),
1700                                 fee_msat: 0,
1701                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1702                                 maybe_announced_channel: true,
1703                         }], blinded_tail: None };
1704
1705                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1706                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1707                                 payment_id: None,
1708                                 payment_hash: PaymentHash([42; 32]),
1709                                 payment_failed_permanently: false,
1710                                 failure: PathFailure::OnPath { network_update: None },
1711                                 path: path.clone(),
1712                                 short_channel_id: Some(scored_scid),
1713                         });
1714                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1715                         match event {
1716                                 Event::PaymentPathFailed { .. } => {},
1717                                 _ => panic!("Unexpected event"),
1718                         }
1719
1720                         // Ensure we'll score payments that were explicitly failed back by the destination as
1721                         // ProbeSuccess.
1722                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1723                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1724                                 payment_id: None,
1725                                 payment_hash: PaymentHash([42; 32]),
1726                                 payment_failed_permanently: true,
1727                                 failure: PathFailure::OnPath { network_update: None },
1728                                 path: path.clone(),
1729                                 short_channel_id: None,
1730                         });
1731                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1732                         match event {
1733                                 Event::PaymentPathFailed { .. } => {},
1734                                 _ => panic!("Unexpected event"),
1735                         }
1736
1737                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1738                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1739                                 payment_id: PaymentId([42; 32]),
1740                                 payment_hash: None,
1741                                 path: path.clone(),
1742                         });
1743                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1744                         match event {
1745                                 Event::PaymentPathSuccessful { .. } => {},
1746                                 _ => panic!("Unexpected event"),
1747                         }
1748
1749                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1750                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1751                                 payment_id: PaymentId([42; 32]),
1752                                 payment_hash: PaymentHash([42; 32]),
1753                                 path: path.clone(),
1754                         });
1755                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1756                         match event {
1757                                 Event::ProbeSuccessful  { .. } => {},
1758                                 _ => panic!("Unexpected event"),
1759                         }
1760
1761                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1762                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1763                                 payment_id: PaymentId([42; 32]),
1764                                 payment_hash: PaymentHash([42; 32]),
1765                                 path,
1766                                 short_channel_id: Some(scored_scid),
1767                         });
1768                         let event = $receive.expect("ProbeFailure not handled within deadline");
1769                         match event {
1770                                 Event::ProbeFailed { .. } => {},
1771                                 _ => panic!("Unexpected event"),
1772                         }
1773                 }
1774         }
1775
1776         #[test]
1777         fn test_payment_path_scoring() {
1778                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1779                 let event_handler = move |event: Event| match event {
1780                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1781                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1782                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1783                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1784                         _ => panic!("Unexpected event: {:?}", event),
1785                 };
1786
1787                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1788                 let data_dir = nodes[0].kv_store.get_data_dir();
1789                 let persister = Arc::new(Persister::new(data_dir));
1790                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1791
1792                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1793
1794                 if !std::thread::panicking() {
1795                         bg_processor.stop().unwrap();
1796                 }
1797
1798                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1799                 let expected_log = "Persisting scorer after update".to_string();
1800                 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1801         }
1802
1803         #[tokio::test]
1804         #[cfg(feature = "futures")]
1805         async fn test_payment_path_scoring_async() {
1806                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1807                 let event_handler = move |event: Event| {
1808                         let sender_ref = sender.clone();
1809                         async move {
1810                                 match event {
1811                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1812                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1813                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1814                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1815                                         _ => panic!("Unexpected event: {:?}", event),
1816                                 }
1817                         }
1818                 };
1819
1820                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1821                 let data_dir = nodes[0].kv_store.get_data_dir();
1822                 let persister = Arc::new(Persister::new(data_dir));
1823
1824                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1825
1826                 let bp_future = super::process_events_async(
1827                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1828                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1829                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1830                                 let mut exit_receiver = exit_receiver.clone();
1831                                 Box::pin(async move {
1832                                         tokio::select! {
1833                                                 _ = tokio::time::sleep(dur) => false,
1834                                                 _ = exit_receiver.changed() => true,
1835                                         }
1836                                 })
1837                         }, false,
1838                 );
1839                 let t1 = tokio::spawn(bp_future);
1840                 let t2 = tokio::spawn(async move {
1841                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1842                         exit_sender.send(()).unwrap();
1843
1844                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1845                         let expected_log = "Persisting scorer after update".to_string();
1846                         assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1847                 });
1848
1849                 let (r1, r2) = tokio::join!(t1, t2);
1850                 r1.unwrap().unwrap();
1851                 r2.unwrap()
1852         }
1853 }