]> git.bitcoin.ninja Git - rust-lightning/blob - lightning-background-processor/src/lib.rs
Pass Record by value to Logger
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         match event {
245                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
246                         let mut score = scorer.write_lock();
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         let mut score = scorer.write_lock();
253                         score.probe_successful(path);
254                 },
255                 Event::PaymentPathSuccessful { path, .. } => {
256                         let mut score = scorer.write_lock();
257                         score.payment_path_successful(path);
258                 },
259                 Event::ProbeSuccessful { path, .. } => {
260                         let mut score = scorer.write_lock();
261                         score.probe_successful(path);
262                 },
263                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
264                         let mut score = scorer.write_lock();
265                         score.probe_failed(path, *scid);
266                 },
267                 _ => return false,
268         }
269         true
270 }
271
272 macro_rules! define_run_body {
273         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
274          $channel_manager: ident, $process_channel_manager_events: expr,
275          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
276          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
277          $check_slow_await: expr)
278         => { {
279                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
280                 $channel_manager.timer_tick_occurred();
281                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
282                 $chain_monitor.rebroadcast_pending_claims();
283
284                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
285                 let mut last_ping_call = $get_timer(PING_TIMER);
286                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
287                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
288                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
289                 let mut have_pruned = false;
290
291                 loop {
292                         $process_channel_manager_events;
293                         $process_chain_monitor_events;
294
295                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
296                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
297                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
298                         // without running the normal event processing above and handing events to users.
299                         //
300                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
301                         // processing a message effectively at any point during this loop. In order to
302                         // minimize the time between such processing completing and persisting the updated
303                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
304                         // generally, and as a fallback place such blocking only immediately before
305                         // persistence.
306                         $peer_manager.as_ref().process_events();
307
308                         // Exit the loop if the background processor was requested to stop.
309                         if $loop_exit_check {
310                                 log_trace!($logger, "Terminating background processor.");
311                                 break;
312                         }
313
314                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
315                         // see `await_start`'s use below.
316                         let mut await_start = None;
317                         if $check_slow_await { await_start = Some($get_timer(1)); }
318                         $await;
319                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
320
321                         // Exit the loop if the background processor was requested to stop.
322                         if $loop_exit_check {
323                                 log_trace!($logger, "Terminating background processor.");
324                                 break;
325                         }
326
327                         if $channel_manager.get_and_clear_needs_persistence() {
328                                 log_trace!($logger, "Persisting ChannelManager...");
329                                 $persister.persist_manager(&*$channel_manager)?;
330                                 log_trace!($logger, "Done persisting ChannelManager.");
331                         }
332                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
333                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
334                                 $channel_manager.timer_tick_occurred();
335                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
336                         }
337                         if await_slow {
338                                 // On various platforms, we may be starved of CPU cycles for several reasons.
339                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
340                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
341                                 // may not get any cycles.
342                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
343                                 // full second, at which point we assume sockets may have been killed (they
344                                 // appear to be at least on some platforms, even if it has only been a second).
345                                 // Note that we have to take care to not get here just because user event
346                                 // processing was slow at the top of the loop. For example, the sample client
347                                 // may call Bitcoin Core RPCs during event handling, which very often takes
348                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
349                                 // peers.
350                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
351                                 $peer_manager.as_ref().disconnect_all_peers();
352                                 last_ping_call = $get_timer(PING_TIMER);
353                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
354                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
355                                 $peer_manager.as_ref().timer_tick_occurred();
356                                 last_ping_call = $get_timer(PING_TIMER);
357                         }
358
359                         // Note that we want to run a graph prune once not long after startup before
360                         // falling back to our usual hourly prunes. This avoids short-lived clients never
361                         // pruning their network graph. We run once 60 seconds after startup before
362                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
363                         // we prune after an initial sync completes.
364                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
365                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
366                         let should_prune = match $gossip_sync {
367                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
368                                 _ => prune_timer_elapsed,
369                         };
370                         if should_prune {
371                                 // The network graph must not be pruned while rapid sync completion is pending
372                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
373                                         #[cfg(feature = "std")] {
374                                                 log_trace!($logger, "Pruning and persisting network graph.");
375                                                 network_graph.remove_stale_channels_and_tracking();
376                                         }
377                                         #[cfg(not(feature = "std"))] {
378                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
379                                                 log_trace!($logger, "Persisting network graph.");
380                                         }
381
382                                         if let Err(e) = $persister.persist_graph(network_graph) {
383                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
384                                         }
385
386                                         have_pruned = true;
387                                 }
388                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
389                                 last_prune_call = $get_timer(prune_timer);
390                         }
391
392                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
393                                 if let Some(ref scorer) = $scorer {
394                                         log_trace!($logger, "Persisting scorer");
395                                         if let Err(e) = $persister.persist_scorer(&scorer) {
396                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
397                                         }
398                                 }
399                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
400                         }
401
402                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
403                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
404                                 $chain_monitor.rebroadcast_pending_claims();
405                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
406                         }
407                 }
408
409                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
410                 // some races where users quit while channel updates were in-flight, with
411                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
412                 $persister.persist_manager(&*$channel_manager)?;
413
414                 // Persist Scorer on exit
415                 if let Some(ref scorer) = $scorer {
416                         $persister.persist_scorer(&scorer)?;
417                 }
418
419                 // Persist NetworkGraph on exit
420                 if let Some(network_graph) = $gossip_sync.network_graph() {
421                         $persister.persist_graph(network_graph)?;
422                 }
423
424                 Ok(())
425         } }
426 }
427
428 #[cfg(feature = "futures")]
429 pub(crate) mod futures_util {
430         use core::future::Future;
431         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
432         use core::pin::Pin;
433         use core::marker::Unpin;
434         pub(crate) struct Selector<
435                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
436         > {
437                 pub a: A,
438                 pub b: B,
439                 pub c: C,
440         }
441         pub(crate) enum SelectorOutput {
442                 A, B, C(bool),
443         }
444
445         impl<
446                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
447         > Future for Selector<A, B, C> {
448                 type Output = SelectorOutput;
449                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
450                         match Pin::new(&mut self.a).poll(ctx) {
451                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
452                                 Poll::Pending => {},
453                         }
454                         match Pin::new(&mut self.b).poll(ctx) {
455                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
456                                 Poll::Pending => {},
457                         }
458                         match Pin::new(&mut self.c).poll(ctx) {
459                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
460                                 Poll::Pending => {},
461                         }
462                         Poll::Pending
463                 }
464         }
465
466         // If we want to poll a future without an async context to figure out if it has completed or
467         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
468         // but sadly there's a good bit of boilerplate here.
469         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
470         fn dummy_waker_action(_: *const ()) { }
471
472         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
473                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
474         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
475 }
476 #[cfg(feature = "futures")]
477 use futures_util::{Selector, SelectorOutput, dummy_waker};
478 #[cfg(feature = "futures")]
479 use core::task;
480
481 /// Processes background events in a future.
482 ///
483 /// `sleeper` should return a future which completes in the given amount of time and returns a
484 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
485 /// future which outputs `true`, the loop will exit and this function's future will complete.
486 /// The `sleeper` future is free to return early after it has triggered the exit condition.
487 ///
488 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
489 ///
490 /// Requires the `futures` feature. Note that while this method is available without the `std`
491 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
492 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
493 /// manually instead.
494 ///
495 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
496 /// mobile device, where we may need to check for interruption of the application regularly. If you
497 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
498 /// are hundreds or thousands of simultaneous process calls running.
499 ///
500 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
501 /// could setup `process_events_async` like this:
502 /// ```
503 /// # use lightning::io;
504 /// # use std::sync::{Arc, RwLock};
505 /// # use std::sync::atomic::{AtomicBool, Ordering};
506 /// # use lightning_background_processor::{process_events_async, GossipSync};
507 /// # struct MyStore {}
508 /// # impl lightning::util::persist::KVStore for MyStore {
509 /// #     fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
510 /// #     fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
511 /// #     fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
512 /// #     fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
513 /// # }
514 /// # struct MyEventHandler {}
515 /// # impl MyEventHandler {
516 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
517 /// # }
518 /// # #[derive(Eq, PartialEq, Clone, Hash)]
519 /// # struct MySocketDescriptor {}
520 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
521 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
522 /// #     fn disconnect_socket(&mut self) {}
523 /// # }
524 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
525 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
526 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
527 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
528 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
529 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
530 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
531 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
532 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
533 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
534 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
535 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
536 ///
537 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
538 ///     let background_persister = Arc::clone(&my_persister);
539 ///     let background_event_handler = Arc::clone(&my_event_handler);
540 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
541 ///     let background_chan_man = Arc::clone(&my_channel_manager);
542 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
543 ///     let background_peer_man = Arc::clone(&my_peer_manager);
544 ///     let background_logger = Arc::clone(&my_logger);
545 ///     let background_scorer = Arc::clone(&my_scorer);
546 ///
547 ///     // Setup the sleeper.
548 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
549 ///
550 ///     let sleeper = move |d| {
551 ///             let mut receiver = stop_receiver.clone();
552 ///             Box::pin(async move {
553 ///                     tokio::select!{
554 ///                             _ = tokio::time::sleep(d) => false,
555 ///                             _ = receiver.changed() => true,
556 ///                     }
557 ///             })
558 ///     };
559 ///
560 ///     let mobile_interruptable_platform = false;
561 ///
562 ///     let handle = tokio::spawn(async move {
563 ///             process_events_async(
564 ///                     background_persister,
565 ///                     |e| background_event_handler.handle_event(e),
566 ///                     background_chain_mon,
567 ///                     background_chan_man,
568 ///                     background_gossip_sync,
569 ///                     background_peer_man,
570 ///                     background_logger,
571 ///                     Some(background_scorer),
572 ///                     sleeper,
573 ///                     mobile_interruptable_platform,
574 ///                     )
575 ///                     .await
576 ///                     .expect("Failed to process events");
577 ///     });
578 ///
579 ///     // Stop the background processing.
580 ///     stop_sender.send(()).unwrap();
581 ///     handle.await.unwrap();
582 ///     # }
583 ///```
584 #[cfg(feature = "futures")]
585 pub async fn process_events_async<
586         'a,
587         UL: 'static + Deref + Send + Sync,
588         CF: 'static + Deref + Send + Sync,
589         CW: 'static + Deref + Send + Sync,
590         T: 'static + Deref + Send + Sync,
591         ES: 'static + Deref + Send + Sync,
592         NS: 'static + Deref + Send + Sync,
593         SP: 'static + Deref + Send + Sync,
594         F: 'static + Deref + Send + Sync,
595         R: 'static + Deref + Send + Sync,
596         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
597         L: 'static + Deref + Send + Sync,
598         P: 'static + Deref + Send + Sync,
599         EventHandlerFuture: core::future::Future<Output = ()>,
600         EventHandler: Fn(Event) -> EventHandlerFuture,
601         PS: 'static + Deref + Send,
602         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
603         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
604         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
605         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
606         APM: APeerManager + Send + Sync,
607         PM: 'static + Deref<Target = APM> + Send + Sync,
608         S: 'static + Deref<Target = SC> + Send + Sync,
609         SC: for<'b> WriteableScore<'b>,
610         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
611         Sleeper: Fn(Duration) -> SleepFuture
612 >(
613         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
614         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
615         sleeper: Sleeper, mobile_interruptable_platform: bool,
616 ) -> Result<(), lightning::io::Error>
617 where
618         UL::Target: 'static + UtxoLookup,
619         CF::Target: 'static + chain::Filter,
620         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
621         T::Target: 'static + BroadcasterInterface,
622         ES::Target: 'static + EntropySource,
623         NS::Target: 'static + NodeSigner,
624         SP::Target: 'static + SignerProvider,
625         F::Target: 'static + FeeEstimator,
626         R::Target: 'static + Router,
627         L::Target: 'static + Logger,
628         P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
629         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
630 {
631         let mut should_break = false;
632         let async_event_handler = |event| {
633                 let network_graph = gossip_sync.network_graph();
634                 let event_handler = &event_handler;
635                 let scorer = &scorer;
636                 let logger = &logger;
637                 let persister = &persister;
638                 async move {
639                         if let Some(network_graph) = network_graph {
640                                 handle_network_graph_update(network_graph, &event)
641                         }
642                         if let Some(ref scorer) = scorer {
643                                 if update_scorer(scorer, &event) {
644                                         log_trace!(logger, "Persisting scorer after update");
645                                         if let Err(e) = persister.persist_scorer(&scorer) {
646                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
647                                         }
648                                 }
649                         }
650                         event_handler(event).await;
651                 }
652         };
653         define_run_body!(persister,
654                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
655                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
656                 gossip_sync, peer_manager, logger, scorer, should_break, {
657                         let fut = Selector {
658                                 a: channel_manager.get_event_or_persistence_needed_future(),
659                                 b: chain_monitor.get_update_future(),
660                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
661                         };
662                         match fut.await {
663                                 SelectorOutput::A|SelectorOutput::B => {},
664                                 SelectorOutput::C(exit) => {
665                                         should_break = exit;
666                                 }
667                         }
668                 }, |t| sleeper(Duration::from_secs(t)),
669                 |fut: &mut SleepFuture, _| {
670                         let mut waker = dummy_waker();
671                         let mut ctx = task::Context::from_waker(&mut waker);
672                         match core::pin::Pin::new(fut).poll(&mut ctx) {
673                                 task::Poll::Ready(exit) => { should_break = exit; true },
674                                 task::Poll::Pending => false,
675                         }
676                 }, mobile_interruptable_platform)
677 }
678
679 #[cfg(feature = "std")]
680 impl BackgroundProcessor {
681         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
682         /// documentation].
683         ///
684         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
685         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
686         /// either [`join`] or [`stop`].
687         ///
688         /// # Data Persistence
689         ///
690         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
691         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
692         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
693         /// provided implementation.
694         ///
695         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
696         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
697         /// See the `lightning-persister` crate for LDK's provided implementation.
698         ///
699         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
700         /// error or call [`join`] and handle any error that may arise. For the latter case,
701         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
702         ///
703         /// # Event Handling
704         ///
705         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
706         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
707         /// functionality implemented by other handlers.
708         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
709         ///
710         /// # Rapid Gossip Sync
711         ///
712         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
713         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
714         /// until the [`RapidGossipSync`] instance completes its first sync.
715         ///
716         /// [top-level documentation]: BackgroundProcessor
717         /// [`join`]: Self::join
718         /// [`stop`]: Self::stop
719         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
720         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
721         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
722         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
723         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
724         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
725         pub fn start<
726                 'a,
727                 UL: 'static + Deref + Send + Sync,
728                 CF: 'static + Deref + Send + Sync,
729                 CW: 'static + Deref + Send + Sync,
730                 T: 'static + Deref + Send + Sync,
731                 ES: 'static + Deref + Send + Sync,
732                 NS: 'static + Deref + Send + Sync,
733                 SP: 'static + Deref + Send + Sync,
734                 F: 'static + Deref + Send + Sync,
735                 R: 'static + Deref + Send + Sync,
736                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
737                 L: 'static + Deref + Send + Sync,
738                 P: 'static + Deref + Send + Sync,
739                 EH: 'static + EventHandler + Send,
740                 PS: 'static + Deref + Send,
741                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
742                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
743                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
744                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
745                 APM: APeerManager + Send + Sync,
746                 PM: 'static + Deref<Target = APM> + Send + Sync,
747                 S: 'static + Deref<Target = SC> + Send + Sync,
748                 SC: for <'b> WriteableScore<'b>,
749         >(
750                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
751                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
752         ) -> Self
753         where
754                 UL::Target: 'static + UtxoLookup,
755                 CF::Target: 'static + chain::Filter,
756                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
757                 T::Target: 'static + BroadcasterInterface,
758                 ES::Target: 'static + EntropySource,
759                 NS::Target: 'static + NodeSigner,
760                 SP::Target: 'static + SignerProvider,
761                 F::Target: 'static + FeeEstimator,
762                 R::Target: 'static + Router,
763                 L::Target: 'static + Logger,
764                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
765                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
766         {
767                 let stop_thread = Arc::new(AtomicBool::new(false));
768                 let stop_thread_clone = stop_thread.clone();
769                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
770                         let event_handler = |event| {
771                                 let network_graph = gossip_sync.network_graph();
772                                 if let Some(network_graph) = network_graph {
773                                         handle_network_graph_update(network_graph, &event)
774                                 }
775                                 if let Some(ref scorer) = scorer {
776                                         if update_scorer(scorer, &event) {
777                                                 log_trace!(logger, "Persisting scorer after update");
778                                                 if let Err(e) = persister.persist_scorer(&scorer) {
779                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
780                                                 }
781                                         }
782                                 }
783                                 event_handler.handle_event(event);
784                         };
785                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
786                                 channel_manager, channel_manager.process_pending_events(&event_handler),
787                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
788                                 { Sleeper::from_two_futures(
789                                         channel_manager.get_event_or_persistence_needed_future(),
790                                         chain_monitor.get_update_future()
791                                 ).wait_timeout(Duration::from_millis(100)); },
792                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
793                 });
794                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
795         }
796
797         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
798         /// [`ChannelManager`].
799         ///
800         /// # Panics
801         ///
802         /// This function panics if the background thread has panicked such as while persisting or
803         /// handling events.
804         ///
805         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
806         pub fn join(mut self) -> Result<(), std::io::Error> {
807                 assert!(self.thread_handle.is_some());
808                 self.join_thread()
809         }
810
811         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
812         /// [`ChannelManager`].
813         ///
814         /// # Panics
815         ///
816         /// This function panics if the background thread has panicked such as while persisting or
817         /// handling events.
818         ///
819         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
820         pub fn stop(mut self) -> Result<(), std::io::Error> {
821                 assert!(self.thread_handle.is_some());
822                 self.stop_and_join_thread()
823         }
824
825         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
826                 self.stop_thread.store(true, Ordering::Release);
827                 self.join_thread()
828         }
829
830         fn join_thread(&mut self) -> Result<(), std::io::Error> {
831                 match self.thread_handle.take() {
832                         Some(handle) => handle.join().unwrap(),
833                         None => Ok(()),
834                 }
835         }
836 }
837
838 #[cfg(feature = "std")]
839 impl Drop for BackgroundProcessor {
840         fn drop(&mut self) {
841                 self.stop_and_join_thread().unwrap();
842         }
843 }
844
845 #[cfg(all(feature = "std", test))]
846 mod tests {
847         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
848         use bitcoin::blockdata::locktime::absolute::LockTime;
849         use bitcoin::blockdata::transaction::{Transaction, TxOut};
850         use bitcoin::network::constants::Network;
851         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
852         use lightning::chain::{BestBlock, Confirm, chainmonitor};
853         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
854         use lightning::sign::{InMemorySigner, KeysManager};
855         use lightning::chain::transaction::OutPoint;
856         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
857         use lightning::{get_event_msg, get_event};
858         use lightning::ln::PaymentHash;
859         use lightning::ln::channelmanager;
860         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
861         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
862         use lightning::ln::functional_test_utils::*;
863         use lightning::ln::msgs::{ChannelMessageHandler, Init};
864         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
865         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
866         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
867         use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
868         use lightning::util::config::UserConfig;
869         use lightning::util::ser::Writeable;
870         use lightning::util::test_utils;
871         use lightning::util::persist::{KVStore,
872                 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
873                 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
874                 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
875         use lightning_persister::fs_store::FilesystemStore;
876         use std::collections::VecDeque;
877         use std::{fs, env};
878         use std::path::PathBuf;
879         use std::sync::{Arc, Mutex};
880         use std::sync::mpsc::SyncSender;
881         use std::time::Duration;
882         use lightning_rapid_gossip_sync::RapidGossipSync;
883         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
884
885         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
886
887         #[derive(Clone, Hash, PartialEq, Eq)]
888         struct TestDescriptor{}
889         impl SocketDescriptor for TestDescriptor {
890                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
891                         0
892                 }
893
894                 fn disconnect_socket(&mut self) {}
895         }
896
897         #[cfg(c_bindings)]
898         type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
899         #[cfg(not(c_bindings))]
900         type LockingWrapper<T> = Mutex<T>;
901
902         type ChannelManager =
903                 channelmanager::ChannelManager<
904                         Arc<ChainMonitor>,
905                         Arc<test_utils::TestBroadcaster>,
906                         Arc<KeysManager>,
907                         Arc<KeysManager>,
908                         Arc<KeysManager>,
909                         Arc<test_utils::TestFeeEstimator>,
910                         Arc<DefaultRouter<
911                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
912                                 Arc<test_utils::TestLogger>,
913                                 Arc<LockingWrapper<TestScorer>>,
914                                 (),
915                                 TestScorer>
916                         >,
917                         Arc<test_utils::TestLogger>>;
918
919         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
920
921         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
922         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
923
924         struct Node {
925                 node: Arc<ChannelManager>,
926                 p2p_gossip_sync: PGS,
927                 rapid_gossip_sync: RGS,
928                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
929                 chain_monitor: Arc<ChainMonitor>,
930                 kv_store: Arc<FilesystemStore>,
931                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
932                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
933                 logger: Arc<test_utils::TestLogger>,
934                 best_block: BestBlock,
935                 scorer: Arc<LockingWrapper<TestScorer>>,
936         }
937
938         impl Node {
939                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
940                         GossipSync::P2P(self.p2p_gossip_sync.clone())
941                 }
942
943                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
944                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
945                 }
946
947                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
948                         GossipSync::None
949                 }
950         }
951
952         impl Drop for Node {
953                 fn drop(&mut self) {
954                         let data_dir = self.kv_store.get_data_dir();
955                         match fs::remove_dir_all(data_dir.clone()) {
956                                 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
957                                 _ => {}
958                         }
959                 }
960         }
961
962         struct Persister {
963                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
964                 graph_persistence_notifier: Option<SyncSender<()>>,
965                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
966                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
967                 kv_store: FilesystemStore,
968         }
969
970         impl Persister {
971                 fn new(data_dir: PathBuf) -> Self {
972                         let kv_store = FilesystemStore::new(data_dir);
973                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
974                 }
975
976                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
977                         Self { graph_error: Some((error, message)), ..self }
978                 }
979
980                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
981                         Self { graph_persistence_notifier: Some(sender), ..self }
982                 }
983
984                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
985                         Self { manager_error: Some((error, message)), ..self }
986                 }
987
988                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
989                         Self { scorer_error: Some((error, message)), ..self }
990                 }
991         }
992
993         impl KVStore for Persister {
994                 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
995                         self.kv_store.read(primary_namespace, secondary_namespace, key)
996                 }
997
998                 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
999                         if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1000                                 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1001                                 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1002                         {
1003                                 if let Some((error, message)) = self.manager_error {
1004                                         return Err(std::io::Error::new(error, message))
1005                                 }
1006                         }
1007
1008                         if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1009                                 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1010                                 key == NETWORK_GRAPH_PERSISTENCE_KEY
1011                         {
1012                                 if let Some(sender) = &self.graph_persistence_notifier {
1013                                         match sender.send(()) {
1014                                                 Ok(()) => {},
1015                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1016                                         }
1017                                 };
1018
1019                                 if let Some((error, message)) = self.graph_error {
1020                                         return Err(std::io::Error::new(error, message))
1021                                 }
1022                         }
1023
1024                         if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1025                                 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1026                                 key == SCORER_PERSISTENCE_KEY
1027                         {
1028                                 if let Some((error, message)) = self.scorer_error {
1029                                         return Err(std::io::Error::new(error, message))
1030                                 }
1031                         }
1032
1033                         self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1034                 }
1035
1036                 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1037                         self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1038                 }
1039
1040                 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1041                         self.kv_store.list(primary_namespace, secondary_namespace)
1042                 }
1043         }
1044
1045         struct TestScorer {
1046                 event_expectations: Option<VecDeque<TestResult>>,
1047         }
1048
1049         #[derive(Debug)]
1050         enum TestResult {
1051                 PaymentFailure { path: Path, short_channel_id: u64 },
1052                 PaymentSuccess { path: Path },
1053                 ProbeFailure { path: Path },
1054                 ProbeSuccess { path: Path },
1055         }
1056
1057         impl TestScorer {
1058                 fn new() -> Self {
1059                         Self { event_expectations: None }
1060                 }
1061
1062                 fn expect(&mut self, expectation: TestResult) {
1063                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1064                 }
1065         }
1066
1067         impl lightning::util::ser::Writeable for TestScorer {
1068                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1069         }
1070
1071         impl ScoreLookUp for TestScorer {
1072                 type ScoreParams = ();
1073                 fn channel_penalty_msat(
1074                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1075                 ) -> u64 { unimplemented!(); }
1076         }
1077
1078         impl ScoreUpdate for TestScorer {
1079                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1080                         if let Some(expectations) = &mut self.event_expectations {
1081                                 match expectations.pop_front().unwrap() {
1082                                         TestResult::PaymentFailure { path, short_channel_id } => {
1083                                                 assert_eq!(actual_path, &path);
1084                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1085                                         },
1086                                         TestResult::PaymentSuccess { path } => {
1087                                                 panic!("Unexpected successful payment path: {:?}", path)
1088                                         },
1089                                         TestResult::ProbeFailure { path } => {
1090                                                 panic!("Unexpected probe failure: {:?}", path)
1091                                         },
1092                                         TestResult::ProbeSuccess { path } => {
1093                                                 panic!("Unexpected probe success: {:?}", path)
1094                                         }
1095                                 }
1096                         }
1097                 }
1098
1099                 fn payment_path_successful(&mut self, actual_path: &Path) {
1100                         if let Some(expectations) = &mut self.event_expectations {
1101                                 match expectations.pop_front().unwrap() {
1102                                         TestResult::PaymentFailure { path, .. } => {
1103                                                 panic!("Unexpected payment path failure: {:?}", path)
1104                                         },
1105                                         TestResult::PaymentSuccess { path } => {
1106                                                 assert_eq!(actual_path, &path);
1107                                         },
1108                                         TestResult::ProbeFailure { path } => {
1109                                                 panic!("Unexpected probe failure: {:?}", path)
1110                                         },
1111                                         TestResult::ProbeSuccess { path } => {
1112                                                 panic!("Unexpected probe success: {:?}", path)
1113                                         }
1114                                 }
1115                         }
1116                 }
1117
1118                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1119                         if let Some(expectations) = &mut self.event_expectations {
1120                                 match expectations.pop_front().unwrap() {
1121                                         TestResult::PaymentFailure { path, .. } => {
1122                                                 panic!("Unexpected payment path failure: {:?}", path)
1123                                         },
1124                                         TestResult::PaymentSuccess { path } => {
1125                                                 panic!("Unexpected payment path success: {:?}", path)
1126                                         },
1127                                         TestResult::ProbeFailure { path } => {
1128                                                 assert_eq!(actual_path, &path);
1129                                         },
1130                                         TestResult::ProbeSuccess { path } => {
1131                                                 panic!("Unexpected probe success: {:?}", path)
1132                                         }
1133                                 }
1134                         }
1135                 }
1136                 fn probe_successful(&mut self, actual_path: &Path) {
1137                         if let Some(expectations) = &mut self.event_expectations {
1138                                 match expectations.pop_front().unwrap() {
1139                                         TestResult::PaymentFailure { path, .. } => {
1140                                                 panic!("Unexpected payment path failure: {:?}", path)
1141                                         },
1142                                         TestResult::PaymentSuccess { path } => {
1143                                                 panic!("Unexpected payment path success: {:?}", path)
1144                                         },
1145                                         TestResult::ProbeFailure { path } => {
1146                                                 panic!("Unexpected probe failure: {:?}", path)
1147                                         },
1148                                         TestResult::ProbeSuccess { path } => {
1149                                                 assert_eq!(actual_path, &path);
1150                                         }
1151                                 }
1152                         }
1153                 }
1154         }
1155
1156         #[cfg(c_bindings)]
1157         impl lightning::routing::scoring::Score for TestScorer {}
1158
1159         impl Drop for TestScorer {
1160                 fn drop(&mut self) {
1161                         if std::thread::panicking() {
1162                                 return;
1163                         }
1164
1165                         if let Some(event_expectations) = &self.event_expectations {
1166                                 if !event_expectations.is_empty() {
1167                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1168                                 }
1169                         }
1170                 }
1171         }
1172
1173         fn get_full_filepath(filepath: String, filename: String) -> String {
1174                 let mut path = PathBuf::from(filepath);
1175                 path.push(filename);
1176                 path.to_str().unwrap().to_string()
1177         }
1178
1179         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1180                 let persist_temp_path = env::temp_dir().join(persist_dir);
1181                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1182                 let network = Network::Bitcoin;
1183                 let mut nodes = Vec::new();
1184                 for i in 0..num_nodes {
1185                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1186                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1187                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1188                         let genesis_block = genesis_block(network);
1189                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1190                         let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1191                         let seed = [i as u8; 32];
1192                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1193                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1194                         let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1195                         let now = Duration::from_secs(genesis_block.header.time as u64);
1196                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1197                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1198                         let best_block = BestBlock::from_network(network);
1199                         let params = ChainParameters { network, best_block };
1200                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1201                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1202                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1203                         let msg_handler = MessageHandler {
1204                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1205                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1206                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1207                         };
1208                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1209                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1210                         nodes.push(node);
1211                 }
1212
1213                 for i in 0..num_nodes {
1214                         for j in (i+1)..num_nodes {
1215                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1216                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1217                                 }, true).unwrap();
1218                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1219                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1220                                 }, false).unwrap();
1221                         }
1222                 }
1223
1224                 (persist_dir, nodes)
1225         }
1226
1227         macro_rules! open_channel {
1228                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1229                         begin_open_channel!($node_a, $node_b, $channel_value);
1230                         let events = $node_a.node.get_and_clear_pending_events();
1231                         assert_eq!(events.len(), 1);
1232                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1233                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1234                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1235                         get_event!($node_b, Event::ChannelPending);
1236                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1237                         get_event!($node_a, Event::ChannelPending);
1238                         tx
1239                 }}
1240         }
1241
1242         macro_rules! begin_open_channel {
1243                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1244                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1245                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1246                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1247                 }}
1248         }
1249
1250         macro_rules! handle_funding_generation_ready {
1251                 ($event: expr, $channel_value: expr) => {{
1252                         match $event {
1253                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1254                                         assert_eq!(channel_value_satoshis, $channel_value);
1255                                         assert_eq!(user_channel_id, 42);
1256
1257                                         let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1258                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1259                                         }]};
1260                                         (temporary_channel_id, tx)
1261                                 },
1262                                 _ => panic!("Unexpected event"),
1263                         }
1264                 }}
1265         }
1266
1267         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1268                 for i in 1..=depth {
1269                         let prev_blockhash = node.best_block.block_hash();
1270                         let height = node.best_block.height() + 1;
1271                         let header = create_dummy_header(prev_blockhash, height);
1272                         let txdata = vec![(0, tx)];
1273                         node.best_block = BestBlock::new(header.block_hash(), height);
1274                         match i {
1275                                 1 => {
1276                                         node.node.transactions_confirmed(&header, &txdata, height);
1277                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1278                                 },
1279                                 x if x == depth => {
1280                                         node.node.best_block_updated(&header, height);
1281                                         node.chain_monitor.best_block_updated(&header, height);
1282                                 },
1283                                 _ => {},
1284                         }
1285                 }
1286         }
1287         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1288                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1289         }
1290
1291         #[test]
1292         fn test_background_processor() {
1293                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1294                 // updates. Also test that when new updates are available, the manager signals that it needs
1295                 // re-persistence and is successfully re-persisted.
1296                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1297
1298                 // Go through the channel creation process so that each node has something to persist. Since
1299                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1300                 // avoid a race with processing events.
1301                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1302
1303                 // Initiate the background processors to watch each node.
1304                 let data_dir = nodes[0].kv_store.get_data_dir();
1305                 let persister = Arc::new(Persister::new(data_dir));
1306                 let event_handler = |_: _| {};
1307                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1308
1309                 macro_rules! check_persisted_data {
1310                         ($node: expr, $filepath: expr) => {
1311                                 let mut expected_bytes = Vec::new();
1312                                 loop {
1313                                         expected_bytes.clear();
1314                                         match $node.write(&mut expected_bytes) {
1315                                                 Ok(()) => {
1316                                                         match std::fs::read($filepath) {
1317                                                                 Ok(bytes) => {
1318                                                                         if bytes == expected_bytes {
1319                                                                                 break
1320                                                                         } else {
1321                                                                                 continue
1322                                                                         }
1323                                                                 },
1324                                                                 Err(_) => continue
1325                                                         }
1326                                                 },
1327                                                 Err(e) => panic!("Unexpected error: {}", e)
1328                                         }
1329                                 }
1330                         }
1331                 }
1332
1333                 // Check that the initial channel manager data is persisted as expected.
1334                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1335                 check_persisted_data!(nodes[0].node, filepath.clone());
1336
1337                 loop {
1338                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1339                 }
1340
1341                 // Force-close the channel.
1342                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1343
1344                 // Check that the force-close updates are persisted.
1345                 check_persisted_data!(nodes[0].node, filepath.clone());
1346                 loop {
1347                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1348                 }
1349
1350                 // Check network graph is persisted
1351                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1352                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1353
1354                 // Check scorer is persisted
1355                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1356                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1357
1358                 if !std::thread::panicking() {
1359                         bg_processor.stop().unwrap();
1360                 }
1361         }
1362
1363         #[test]
1364         fn test_timer_tick_called() {
1365                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1366                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1367                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1368                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1369                 let data_dir = nodes[0].kv_store.get_data_dir();
1370                 let persister = Arc::new(Persister::new(data_dir));
1371                 let event_handler = |_: _| {};
1372                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1373                 loop {
1374                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1375                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1376                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1377                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1378                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1379                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1380                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1381                                 break
1382                         }
1383                 }
1384
1385                 if !std::thread::panicking() {
1386                         bg_processor.stop().unwrap();
1387                 }
1388         }
1389
1390         #[test]
1391         fn test_channel_manager_persist_error() {
1392                 // Test that if we encounter an error during manager persistence, the thread panics.
1393                 let (_, nodes) = create_nodes(2, "test_persist_error");
1394                 open_channel!(nodes[0], nodes[1], 100000);
1395
1396                 let data_dir = nodes[0].kv_store.get_data_dir();
1397                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1398                 let event_handler = |_: _| {};
1399                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1400                 match bg_processor.join() {
1401                         Ok(_) => panic!("Expected error persisting manager"),
1402                         Err(e) => {
1403                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1404                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1405                         },
1406                 }
1407         }
1408
1409         #[tokio::test]
1410         #[cfg(feature = "futures")]
1411         async fn test_channel_manager_persist_error_async() {
1412                 // Test that if we encounter an error during manager persistence, the thread panics.
1413                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1414                 open_channel!(nodes[0], nodes[1], 100000);
1415
1416                 let data_dir = nodes[0].kv_store.get_data_dir();
1417                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1418
1419                 let bp_future = super::process_events_async(
1420                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1421                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1422                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1423                                 Box::pin(async move {
1424                                         tokio::time::sleep(dur).await;
1425                                         false // Never exit
1426                                 })
1427                         }, false,
1428                 );
1429                 match bp_future.await {
1430                         Ok(_) => panic!("Expected error persisting manager"),
1431                         Err(e) => {
1432                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1433                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1434                         },
1435                 }
1436         }
1437
1438         #[test]
1439         fn test_network_graph_persist_error() {
1440                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1441                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1442                 let data_dir = nodes[0].kv_store.get_data_dir();
1443                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1444                 let event_handler = |_: _| {};
1445                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1446
1447                 match bg_processor.stop() {
1448                         Ok(_) => panic!("Expected error persisting network graph"),
1449                         Err(e) => {
1450                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1451                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1452                         },
1453                 }
1454         }
1455
1456         #[test]
1457         fn test_scorer_persist_error() {
1458                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1459                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1460                 let data_dir = nodes[0].kv_store.get_data_dir();
1461                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1462                 let event_handler = |_: _| {};
1463                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1464
1465                 match bg_processor.stop() {
1466                         Ok(_) => panic!("Expected error persisting scorer"),
1467                         Err(e) => {
1468                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1469                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1470                         },
1471                 }
1472         }
1473
1474         #[test]
1475         fn test_background_event_handling() {
1476                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1477                 let channel_value = 100000;
1478                 let data_dir = nodes[0].kv_store.get_data_dir();
1479                 let persister = Arc::new(Persister::new(data_dir.clone()));
1480
1481                 // Set up a background event handler for FundingGenerationReady events.
1482                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1483                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1484                 let event_handler = move |event: Event| match event {
1485                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1486                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1487                         Event::ChannelReady { .. } => {},
1488                         _ => panic!("Unexpected event: {:?}", event),
1489                 };
1490
1491                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1492
1493                 // Open a channel and check that the FundingGenerationReady event was handled.
1494                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1495                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1496                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1497                         .expect("FundingGenerationReady not handled within deadline");
1498                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1499                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1500                 get_event!(nodes[1], Event::ChannelPending);
1501                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1502                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1503                         .expect("ChannelPending not handled within deadline");
1504
1505                 // Confirm the funding transaction.
1506                 confirm_transaction(&mut nodes[0], &funding_tx);
1507                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1508                 confirm_transaction(&mut nodes[1], &funding_tx);
1509                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1510                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1511                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1512                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1513                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1514
1515                 if !std::thread::panicking() {
1516                         bg_processor.stop().unwrap();
1517                 }
1518
1519                 // Set up a background event handler for SpendableOutputs events.
1520                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1521                 let event_handler = move |event: Event| match event {
1522                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1523                         Event::ChannelReady { .. } => {},
1524                         Event::ChannelClosed { .. } => {},
1525                         _ => panic!("Unexpected event: {:?}", event),
1526                 };
1527                 let persister = Arc::new(Persister::new(data_dir));
1528                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1529
1530                 // Force close the channel and check that the SpendableOutputs event was handled.
1531                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1532                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1533                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1534
1535                 let event = receiver
1536                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1537                         .expect("Events not handled within deadline");
1538                 match event {
1539                         Event::SpendableOutputs { .. } => {},
1540                         _ => panic!("Unexpected event: {:?}", event),
1541                 }
1542
1543                 if !std::thread::panicking() {
1544                         bg_processor.stop().unwrap();
1545                 }
1546         }
1547
1548         #[test]
1549         fn test_scorer_persistence() {
1550                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1551                 let data_dir = nodes[0].kv_store.get_data_dir();
1552                 let persister = Arc::new(Persister::new(data_dir));
1553                 let event_handler = |_: _| {};
1554                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1555
1556                 loop {
1557                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1558                         let expected_log = "Persisting scorer".to_string();
1559                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1560                                 break
1561                         }
1562                 }
1563
1564                 if !std::thread::panicking() {
1565                         bg_processor.stop().unwrap();
1566                 }
1567         }
1568
1569         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1570                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1571                         let features = ChannelFeatures::empty();
1572                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1573                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1574                         ).expect("Failed to update channel from partial announcement");
1575                         let original_graph_description = $nodes[0].network_graph.to_string();
1576                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1577                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1578
1579                         loop {
1580                                 $sleep;
1581                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1582                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1583                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1584                                         .unwrap_or(&0) > 1
1585                                 {
1586                                         // Wait until the loop has gone around at least twice.
1587                                         break
1588                                 }
1589                         }
1590
1591                         let initialization_input = vec![
1592                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1593                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1594                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1595                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1596                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1597                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1598                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1599                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1600                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1601                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1602                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1603                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1604                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1605                         ];
1606                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1607
1608                         // this should have added two channels and pruned the previous one.
1609                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1610
1611                         $receive.expect("Network graph not pruned within deadline");
1612
1613                         // all channels should now be pruned
1614                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1615                 }
1616         }
1617
1618         #[test]
1619         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1620                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1621
1622                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1623                 let data_dir = nodes[0].kv_store.get_data_dir();
1624                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1625
1626                 let event_handler = |_: _| {};
1627                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1628
1629                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1630                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1631                         std::thread::sleep(Duration::from_millis(1)));
1632
1633                 background_processor.stop().unwrap();
1634         }
1635
1636         #[tokio::test]
1637         #[cfg(feature = "futures")]
1638         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1639                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1640
1641                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1642                 let data_dir = nodes[0].kv_store.get_data_dir();
1643                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1644
1645                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1646                 let bp_future = super::process_events_async(
1647                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1648                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1649                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1650                                 let mut exit_receiver = exit_receiver.clone();
1651                                 Box::pin(async move {
1652                                         tokio::select! {
1653                                                 _ = tokio::time::sleep(dur) => false,
1654                                                 _ = exit_receiver.changed() => true,
1655                                         }
1656                                 })
1657                         }, false,
1658                 );
1659
1660                 let t1 = tokio::spawn(bp_future);
1661                 let t2 = tokio::spawn(async move {
1662                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1663                                 let mut i = 0;
1664                                 loop {
1665                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1666                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1667                                         assert!(i < 5);
1668                                         i += 1;
1669                                 }
1670                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1671                         exit_sender.send(()).unwrap();
1672                 });
1673                 let (r1, r2) = tokio::join!(t1, t2);
1674                 r1.unwrap().unwrap();
1675                 r2.unwrap()
1676         }
1677
1678         macro_rules! do_test_payment_path_scoring {
1679                 ($nodes: expr, $receive: expr) => {
1680                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1681                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1682                         // public or else we won't score it).
1683                         // A background event handler for FundingGenerationReady events must be hooked up to a
1684                         // running background processor.
1685                         let scored_scid = 4242;
1686                         let secp_ctx = Secp256k1::new();
1687                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1688                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1689
1690                         let path = Path { hops: vec![RouteHop {
1691                                 pubkey: node_1_id,
1692                                 node_features: NodeFeatures::empty(),
1693                                 short_channel_id: scored_scid,
1694                                 channel_features: ChannelFeatures::empty(),
1695                                 fee_msat: 0,
1696                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1697                                 maybe_announced_channel: true,
1698                         }], blinded_tail: None };
1699
1700                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1701                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1702                                 payment_id: None,
1703                                 payment_hash: PaymentHash([42; 32]),
1704                                 payment_failed_permanently: false,
1705                                 failure: PathFailure::OnPath { network_update: None },
1706                                 path: path.clone(),
1707                                 short_channel_id: Some(scored_scid),
1708                         });
1709                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1710                         match event {
1711                                 Event::PaymentPathFailed { .. } => {},
1712                                 _ => panic!("Unexpected event"),
1713                         }
1714
1715                         // Ensure we'll score payments that were explicitly failed back by the destination as
1716                         // ProbeSuccess.
1717                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1718                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1719                                 payment_id: None,
1720                                 payment_hash: PaymentHash([42; 32]),
1721                                 payment_failed_permanently: true,
1722                                 failure: PathFailure::OnPath { network_update: None },
1723                                 path: path.clone(),
1724                                 short_channel_id: None,
1725                         });
1726                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1727                         match event {
1728                                 Event::PaymentPathFailed { .. } => {},
1729                                 _ => panic!("Unexpected event"),
1730                         }
1731
1732                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1733                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1734                                 payment_id: PaymentId([42; 32]),
1735                                 payment_hash: None,
1736                                 path: path.clone(),
1737                         });
1738                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1739                         match event {
1740                                 Event::PaymentPathSuccessful { .. } => {},
1741                                 _ => panic!("Unexpected event"),
1742                         }
1743
1744                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1745                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1746                                 payment_id: PaymentId([42; 32]),
1747                                 payment_hash: PaymentHash([42; 32]),
1748                                 path: path.clone(),
1749                         });
1750                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1751                         match event {
1752                                 Event::ProbeSuccessful  { .. } => {},
1753                                 _ => panic!("Unexpected event"),
1754                         }
1755
1756                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1757                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1758                                 payment_id: PaymentId([42; 32]),
1759                                 payment_hash: PaymentHash([42; 32]),
1760                                 path,
1761                                 short_channel_id: Some(scored_scid),
1762                         });
1763                         let event = $receive.expect("ProbeFailure not handled within deadline");
1764                         match event {
1765                                 Event::ProbeFailed { .. } => {},
1766                                 _ => panic!("Unexpected event"),
1767                         }
1768                 }
1769         }
1770
1771         #[test]
1772         fn test_payment_path_scoring() {
1773                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1774                 let event_handler = move |event: Event| match event {
1775                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1776                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1777                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1778                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1779                         _ => panic!("Unexpected event: {:?}", event),
1780                 };
1781
1782                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1783                 let data_dir = nodes[0].kv_store.get_data_dir();
1784                 let persister = Arc::new(Persister::new(data_dir));
1785                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1786
1787                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1788
1789                 if !std::thread::panicking() {
1790                         bg_processor.stop().unwrap();
1791                 }
1792
1793                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1794                 let expected_log = "Persisting scorer after update".to_string();
1795                 assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1796         }
1797
1798         #[tokio::test]
1799         #[cfg(feature = "futures")]
1800         async fn test_payment_path_scoring_async() {
1801                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1802                 let event_handler = move |event: Event| {
1803                         let sender_ref = sender.clone();
1804                         async move {
1805                                 match event {
1806                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1807                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1808                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1809                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1810                                         _ => panic!("Unexpected event: {:?}", event),
1811                                 }
1812                         }
1813                 };
1814
1815                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1816                 let data_dir = nodes[0].kv_store.get_data_dir();
1817                 let persister = Arc::new(Persister::new(data_dir));
1818
1819                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1820
1821                 let bp_future = super::process_events_async(
1822                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1823                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1824                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1825                                 let mut exit_receiver = exit_receiver.clone();
1826                                 Box::pin(async move {
1827                                         tokio::select! {
1828                                                 _ = tokio::time::sleep(dur) => false,
1829                                                 _ = exit_receiver.changed() => true,
1830                                         }
1831                                 })
1832                         }, false,
1833                 );
1834                 let t1 = tokio::spawn(bp_future);
1835                 let t2 = tokio::spawn(async move {
1836                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1837                         exit_sender.send(()).unwrap();
1838
1839                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1840                         let expected_log = "Persisting scorer after update".to_string();
1841                         assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1842                 });
1843
1844                 let (r1, r2) = tokio::join!(t1, t2);
1845                 r1.unwrap().unwrap();
1846                 r2.unwrap()
1847         }
1848 }