Process OnionMessageHandler events in background
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         match event {
245                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
246                         let mut score = scorer.write_lock();
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         let mut score = scorer.write_lock();
253                         score.probe_successful(path);
254                 },
255                 Event::PaymentPathSuccessful { path, .. } => {
256                         let mut score = scorer.write_lock();
257                         score.payment_path_successful(path);
258                 },
259                 Event::ProbeSuccessful { path, .. } => {
260                         let mut score = scorer.write_lock();
261                         score.probe_successful(path);
262                 },
263                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
264                         let mut score = scorer.write_lock();
265                         score.probe_failed(path, *scid);
266                 },
267                 _ => return false,
268         }
269         true
270 }
271
272 macro_rules! define_run_body {
273         (
274                 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
275                 $channel_manager: ident, $process_channel_manager_events: expr,
276                 $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
277                 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
278                 $timer_elapsed: expr, $check_slow_await: expr
279         ) => { {
280                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
281                 $channel_manager.timer_tick_occurred();
282                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
283                 $chain_monitor.rebroadcast_pending_claims();
284
285                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
286                 let mut last_ping_call = $get_timer(PING_TIMER);
287                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
288                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
289                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
290                 let mut have_pruned = false;
291
292                 loop {
293                         $process_channel_manager_events;
294                         $process_chain_monitor_events;
295                         $process_onion_message_handler_events;
296
297                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
298                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
299                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
300                         // without running the normal event processing above and handing events to users.
301                         //
302                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
303                         // processing a message effectively at any point during this loop. In order to
304                         // minimize the time between such processing completing and persisting the updated
305                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
306                         // generally, and as a fallback place such blocking only immediately before
307                         // persistence.
308                         $peer_manager.as_ref().process_events();
309
310                         // Exit the loop if the background processor was requested to stop.
311                         if $loop_exit_check {
312                                 log_trace!($logger, "Terminating background processor.");
313                                 break;
314                         }
315
316                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
317                         // see `await_start`'s use below.
318                         let mut await_start = None;
319                         if $check_slow_await { await_start = Some($get_timer(1)); }
320                         $await;
321                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
322
323                         // Exit the loop if the background processor was requested to stop.
324                         if $loop_exit_check {
325                                 log_trace!($logger, "Terminating background processor.");
326                                 break;
327                         }
328
329                         if $channel_manager.get_and_clear_needs_persistence() {
330                                 log_trace!($logger, "Persisting ChannelManager...");
331                                 $persister.persist_manager(&*$channel_manager)?;
332                                 log_trace!($logger, "Done persisting ChannelManager.");
333                         }
334                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
335                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
336                                 $channel_manager.timer_tick_occurred();
337                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
338                         }
339                         if await_slow {
340                                 // On various platforms, we may be starved of CPU cycles for several reasons.
341                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
342                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
343                                 // may not get any cycles.
344                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
345                                 // full second, at which point we assume sockets may have been killed (they
346                                 // appear to be at least on some platforms, even if it has only been a second).
347                                 // Note that we have to take care to not get here just because user event
348                                 // processing was slow at the top of the loop. For example, the sample client
349                                 // may call Bitcoin Core RPCs during event handling, which very often takes
350                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
351                                 // peers.
352                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
353                                 $peer_manager.as_ref().disconnect_all_peers();
354                                 last_ping_call = $get_timer(PING_TIMER);
355                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
356                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
357                                 $peer_manager.as_ref().timer_tick_occurred();
358                                 last_ping_call = $get_timer(PING_TIMER);
359                         }
360
361                         // Note that we want to run a graph prune once not long after startup before
362                         // falling back to our usual hourly prunes. This avoids short-lived clients never
363                         // pruning their network graph. We run once 60 seconds after startup before
364                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
365                         // we prune after an initial sync completes.
366                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
367                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
368                         let should_prune = match $gossip_sync {
369                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
370                                 _ => prune_timer_elapsed,
371                         };
372                         if should_prune {
373                                 // The network graph must not be pruned while rapid sync completion is pending
374                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
375                                         #[cfg(feature = "std")] {
376                                                 log_trace!($logger, "Pruning and persisting network graph.");
377                                                 network_graph.remove_stale_channels_and_tracking();
378                                         }
379                                         #[cfg(not(feature = "std"))] {
380                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
381                                                 log_trace!($logger, "Persisting network graph.");
382                                         }
383
384                                         if let Err(e) = $persister.persist_graph(network_graph) {
385                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
386                                         }
387
388                                         have_pruned = true;
389                                 }
390                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
391                                 last_prune_call = $get_timer(prune_timer);
392                         }
393
394                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
395                                 if let Some(ref scorer) = $scorer {
396                                         log_trace!($logger, "Persisting scorer");
397                                         if let Err(e) = $persister.persist_scorer(&scorer) {
398                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
399                                         }
400                                 }
401                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
402                         }
403
404                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
405                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
406                                 $chain_monitor.rebroadcast_pending_claims();
407                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
408                         }
409                 }
410
411                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
412                 // some races where users quit while channel updates were in-flight, with
413                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
414                 $persister.persist_manager(&*$channel_manager)?;
415
416                 // Persist Scorer on exit
417                 if let Some(ref scorer) = $scorer {
418                         $persister.persist_scorer(&scorer)?;
419                 }
420
421                 // Persist NetworkGraph on exit
422                 if let Some(network_graph) = $gossip_sync.network_graph() {
423                         $persister.persist_graph(network_graph)?;
424                 }
425
426                 Ok(())
427         } }
428 }
429
430 #[cfg(feature = "futures")]
431 pub(crate) mod futures_util {
432         use core::future::Future;
433         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
434         use core::pin::Pin;
435         use core::marker::Unpin;
436         pub(crate) struct Selector<
437                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
438         > {
439                 pub a: A,
440                 pub b: B,
441                 pub c: C,
442         }
443         pub(crate) enum SelectorOutput {
444                 A, B, C(bool),
445         }
446
447         impl<
448                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
449         > Future for Selector<A, B, C> {
450                 type Output = SelectorOutput;
451                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
452                         match Pin::new(&mut self.a).poll(ctx) {
453                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
454                                 Poll::Pending => {},
455                         }
456                         match Pin::new(&mut self.b).poll(ctx) {
457                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
458                                 Poll::Pending => {},
459                         }
460                         match Pin::new(&mut self.c).poll(ctx) {
461                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
462                                 Poll::Pending => {},
463                         }
464                         Poll::Pending
465                 }
466         }
467
468         // If we want to poll a future without an async context to figure out if it has completed or
469         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
470         // but sadly there's a good bit of boilerplate here.
471         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
472         fn dummy_waker_action(_: *const ()) { }
473
474         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
475                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
476         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
477 }
478 #[cfg(feature = "futures")]
479 use futures_util::{Selector, SelectorOutput, dummy_waker};
480 #[cfg(feature = "futures")]
481 use core::task;
482
483 /// Processes background events in a future.
484 ///
485 /// `sleeper` should return a future which completes in the given amount of time and returns a
486 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
487 /// future which outputs `true`, the loop will exit and this function's future will complete.
488 /// The `sleeper` future is free to return early after it has triggered the exit condition.
489 ///
490 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
491 ///
492 /// Requires the `futures` feature. Note that while this method is available without the `std`
493 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
494 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
495 /// manually instead.
496 ///
497 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
498 /// mobile device, where we may need to check for interruption of the application regularly. If you
499 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
500 /// are hundreds or thousands of simultaneous process calls running.
501 ///
502 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
503 /// could setup `process_events_async` like this:
504 /// ```
505 /// # use lightning::io;
506 /// # use std::sync::{Arc, RwLock};
507 /// # use std::sync::atomic::{AtomicBool, Ordering};
508 /// # use lightning_background_processor::{process_events_async, GossipSync};
509 /// # struct MyStore {}
510 /// # impl lightning::util::persist::KVStore for MyStore {
511 /// #     fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
512 /// #     fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
513 /// #     fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
514 /// #     fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
515 /// # }
516 /// # struct MyEventHandler {}
517 /// # impl MyEventHandler {
518 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
519 /// # }
520 /// # #[derive(Eq, PartialEq, Clone, Hash)]
521 /// # struct MySocketDescriptor {}
522 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
523 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
524 /// #     fn disconnect_socket(&mut self) {}
525 /// # }
526 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
527 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
528 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
529 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
530 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
531 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
532 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
533 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
534 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
535 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
536 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
537 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
538 ///
539 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
540 ///     let background_persister = Arc::clone(&my_persister);
541 ///     let background_event_handler = Arc::clone(&my_event_handler);
542 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
543 ///     let background_chan_man = Arc::clone(&my_channel_manager);
544 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
545 ///     let background_peer_man = Arc::clone(&my_peer_manager);
546 ///     let background_logger = Arc::clone(&my_logger);
547 ///     let background_scorer = Arc::clone(&my_scorer);
548 ///
549 ///     // Setup the sleeper.
550 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
551 ///
552 ///     let sleeper = move |d| {
553 ///             let mut receiver = stop_receiver.clone();
554 ///             Box::pin(async move {
555 ///                     tokio::select!{
556 ///                             _ = tokio::time::sleep(d) => false,
557 ///                             _ = receiver.changed() => true,
558 ///                     }
559 ///             })
560 ///     };
561 ///
562 ///     let mobile_interruptable_platform = false;
563 ///
564 ///     let handle = tokio::spawn(async move {
565 ///             process_events_async(
566 ///                     background_persister,
567 ///                     |e| background_event_handler.handle_event(e),
568 ///                     background_chain_mon,
569 ///                     background_chan_man,
570 ///                     background_gossip_sync,
571 ///                     background_peer_man,
572 ///                     background_logger,
573 ///                     Some(background_scorer),
574 ///                     sleeper,
575 ///                     mobile_interruptable_platform,
576 ///                     )
577 ///                     .await
578 ///                     .expect("Failed to process events");
579 ///     });
580 ///
581 ///     // Stop the background processing.
582 ///     stop_sender.send(()).unwrap();
583 ///     handle.await.unwrap();
584 ///     # }
585 ///```
586 #[cfg(feature = "futures")]
587 pub async fn process_events_async<
588         'a,
589         UL: 'static + Deref + Send + Sync,
590         CF: 'static + Deref + Send + Sync,
591         CW: 'static + Deref + Send + Sync,
592         T: 'static + Deref + Send + Sync,
593         ES: 'static + Deref + Send + Sync,
594         NS: 'static + Deref + Send + Sync,
595         SP: 'static + Deref + Send + Sync,
596         F: 'static + Deref + Send + Sync,
597         R: 'static + Deref + Send + Sync,
598         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
599         L: 'static + Deref + Send + Sync,
600         P: 'static + Deref + Send + Sync,
601         EventHandlerFuture: core::future::Future<Output = ()>,
602         EventHandler: Fn(Event) -> EventHandlerFuture,
603         PS: 'static + Deref + Send,
604         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
605         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
606         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
607         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
608         PM: 'static + Deref + Send + Sync,
609         S: 'static + Deref<Target = SC> + Send + Sync,
610         SC: for<'b> WriteableScore<'b>,
611         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
612         Sleeper: Fn(Duration) -> SleepFuture
613 >(
614         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
615         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
616         sleeper: Sleeper, mobile_interruptable_platform: bool,
617 ) -> Result<(), lightning::io::Error>
618 where
619         UL::Target: 'static + UtxoLookup,
620         CF::Target: 'static + chain::Filter,
621         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
622         T::Target: 'static + BroadcasterInterface,
623         ES::Target: 'static + EntropySource,
624         NS::Target: 'static + NodeSigner,
625         SP::Target: 'static + SignerProvider,
626         F::Target: 'static + FeeEstimator,
627         R::Target: 'static + Router,
628         L::Target: 'static + Logger,
629         P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
630         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
631         PM::Target: APeerManager + Send + Sync,
632 {
633         let mut should_break = false;
634         let async_event_handler = |event| {
635                 let network_graph = gossip_sync.network_graph();
636                 let event_handler = &event_handler;
637                 let scorer = &scorer;
638                 let logger = &logger;
639                 let persister = &persister;
640                 async move {
641                         if let Some(network_graph) = network_graph {
642                                 handle_network_graph_update(network_graph, &event)
643                         }
644                         if let Some(ref scorer) = scorer {
645                                 if update_scorer(scorer, &event) {
646                                         log_trace!(logger, "Persisting scorer after update");
647                                         if let Err(e) = persister.persist_scorer(&scorer) {
648                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
649                                         }
650                                 }
651                         }
652                         event_handler(event).await;
653                 }
654         };
655         define_run_body!(
656                 persister, chain_monitor,
657                 chain_monitor.process_pending_events_async(async_event_handler).await,
658                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
659                 peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
660                 gossip_sync, logger, scorer, should_break, {
661                         let fut = Selector {
662                                 a: channel_manager.get_event_or_persistence_needed_future(),
663                                 b: chain_monitor.get_update_future(),
664                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
665                         };
666                         match fut.await {
667                                 SelectorOutput::A|SelectorOutput::B => {},
668                                 SelectorOutput::C(exit) => {
669                                         should_break = exit;
670                                 }
671                         }
672                 }, |t| sleeper(Duration::from_secs(t)),
673                 |fut: &mut SleepFuture, _| {
674                         let mut waker = dummy_waker();
675                         let mut ctx = task::Context::from_waker(&mut waker);
676                         match core::pin::Pin::new(fut).poll(&mut ctx) {
677                                 task::Poll::Ready(exit) => { should_break = exit; true },
678                                 task::Poll::Pending => false,
679                         }
680                 }, mobile_interruptable_platform
681         )
682 }
683
684 #[cfg(feature = "futures")]
685 async fn process_onion_message_handler_events_async<
686         EventHandlerFuture: core::future::Future<Output = ()>,
687         EventHandler: Fn(Event) -> EventHandlerFuture,
688         PM: 'static + Deref + Send + Sync,
689 >(
690         peer_manager: &PM, handler: EventHandler
691 )
692 where
693         PM::Target: APeerManager + Send + Sync,
694 {
695         use lightning::events::EventsProvider;
696
697         let events = core::cell::RefCell::new(Vec::new());
698         peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
699
700         for event in events.into_inner() {
701                 handler(event).await
702         }
703 }
704
705 #[cfg(feature = "std")]
706 impl BackgroundProcessor {
707         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
708         /// documentation].
709         ///
710         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
711         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
712         /// either [`join`] or [`stop`].
713         ///
714         /// # Data Persistence
715         ///
716         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
717         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
718         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
719         /// provided implementation.
720         ///
721         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
722         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
723         /// See the `lightning-persister` crate for LDK's provided implementation.
724         ///
725         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
726         /// error or call [`join`] and handle any error that may arise. For the latter case,
727         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
728         ///
729         /// # Event Handling
730         ///
731         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
732         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
733         /// functionality implemented by other handlers.
734         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
735         ///
736         /// # Rapid Gossip Sync
737         ///
738         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
739         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
740         /// until the [`RapidGossipSync`] instance completes its first sync.
741         ///
742         /// [top-level documentation]: BackgroundProcessor
743         /// [`join`]: Self::join
744         /// [`stop`]: Self::stop
745         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
746         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
747         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
748         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
749         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
750         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
751         pub fn start<
752                 'a,
753                 UL: 'static + Deref + Send + Sync,
754                 CF: 'static + Deref + Send + Sync,
755                 CW: 'static + Deref + Send + Sync,
756                 T: 'static + Deref + Send + Sync,
757                 ES: 'static + Deref + Send + Sync,
758                 NS: 'static + Deref + Send + Sync,
759                 SP: 'static + Deref + Send + Sync,
760                 F: 'static + Deref + Send + Sync,
761                 R: 'static + Deref + Send + Sync,
762                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
763                 L: 'static + Deref + Send + Sync,
764                 P: 'static + Deref + Send + Sync,
765                 EH: 'static + EventHandler + Send,
766                 PS: 'static + Deref + Send,
767                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
768                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
769                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
770                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
771                 PM: 'static + Deref + Send + Sync,
772                 S: 'static + Deref<Target = SC> + Send + Sync,
773                 SC: for <'b> WriteableScore<'b>,
774         >(
775                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
776                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
777         ) -> Self
778         where
779                 UL::Target: 'static + UtxoLookup,
780                 CF::Target: 'static + chain::Filter,
781                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
782                 T::Target: 'static + BroadcasterInterface,
783                 ES::Target: 'static + EntropySource,
784                 NS::Target: 'static + NodeSigner,
785                 SP::Target: 'static + SignerProvider,
786                 F::Target: 'static + FeeEstimator,
787                 R::Target: 'static + Router,
788                 L::Target: 'static + Logger,
789                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
790                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
791                 PM::Target: APeerManager + Send + Sync,
792         {
793                 let stop_thread = Arc::new(AtomicBool::new(false));
794                 let stop_thread_clone = stop_thread.clone();
795                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
796                         let event_handler = |event| {
797                                 let network_graph = gossip_sync.network_graph();
798                                 if let Some(network_graph) = network_graph {
799                                         handle_network_graph_update(network_graph, &event)
800                                 }
801                                 if let Some(ref scorer) = scorer {
802                                         if update_scorer(scorer, &event) {
803                                                 log_trace!(logger, "Persisting scorer after update");
804                                                 if let Err(e) = persister.persist_scorer(&scorer) {
805                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
806                                                 }
807                                         }
808                                 }
809                                 event_handler.handle_event(event);
810                         };
811                         define_run_body!(
812                                 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
813                                 channel_manager, channel_manager.process_pending_events(&event_handler),
814                                 peer_manager,
815                                 peer_manager.onion_message_handler().process_pending_events(&event_handler),
816                                 gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
817                                 { Sleeper::from_two_futures(
818                                         channel_manager.get_event_or_persistence_needed_future(),
819                                         chain_monitor.get_update_future()
820                                 ).wait_timeout(Duration::from_millis(100)); },
821                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false
822                         )
823                 });
824                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
825         }
826
827         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
828         /// [`ChannelManager`].
829         ///
830         /// # Panics
831         ///
832         /// This function panics if the background thread has panicked such as while persisting or
833         /// handling events.
834         ///
835         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
836         pub fn join(mut self) -> Result<(), std::io::Error> {
837                 assert!(self.thread_handle.is_some());
838                 self.join_thread()
839         }
840
841         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
842         /// [`ChannelManager`].
843         ///
844         /// # Panics
845         ///
846         /// This function panics if the background thread has panicked such as while persisting or
847         /// handling events.
848         ///
849         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
850         pub fn stop(mut self) -> Result<(), std::io::Error> {
851                 assert!(self.thread_handle.is_some());
852                 self.stop_and_join_thread()
853         }
854
855         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
856                 self.stop_thread.store(true, Ordering::Release);
857                 self.join_thread()
858         }
859
860         fn join_thread(&mut self) -> Result<(), std::io::Error> {
861                 match self.thread_handle.take() {
862                         Some(handle) => handle.join().unwrap(),
863                         None => Ok(()),
864                 }
865         }
866 }
867
868 #[cfg(feature = "std")]
869 impl Drop for BackgroundProcessor {
870         fn drop(&mut self) {
871                 self.stop_and_join_thread().unwrap();
872         }
873 }
874
875 #[cfg(all(feature = "std", test))]
876 mod tests {
877         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
878         use bitcoin::blockdata::locktime::absolute::LockTime;
879         use bitcoin::blockdata::transaction::{Transaction, TxOut};
880         use bitcoin::network::constants::Network;
881         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
882         use lightning::chain::{BestBlock, Confirm, chainmonitor};
883         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
884         use lightning::sign::{InMemorySigner, KeysManager};
885         use lightning::chain::transaction::OutPoint;
886         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
887         use lightning::{get_event_msg, get_event};
888         use lightning::ln::PaymentHash;
889         use lightning::ln::channelmanager;
890         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
891         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
892         use lightning::ln::functional_test_utils::*;
893         use lightning::ln::msgs::{ChannelMessageHandler, Init};
894         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
895         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
896         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
897         use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
898         use lightning::util::config::UserConfig;
899         use lightning::util::ser::Writeable;
900         use lightning::util::test_utils;
901         use lightning::util::persist::{KVStore,
902                 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
903                 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
904                 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
905         use lightning_persister::fs_store::FilesystemStore;
906         use std::collections::VecDeque;
907         use std::{fs, env};
908         use std::path::PathBuf;
909         use std::sync::{Arc, Mutex};
910         use std::sync::mpsc::SyncSender;
911         use std::time::Duration;
912         use lightning_rapid_gossip_sync::RapidGossipSync;
913         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
914
915         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
916
917         #[derive(Clone, Hash, PartialEq, Eq)]
918         struct TestDescriptor{}
919         impl SocketDescriptor for TestDescriptor {
920                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
921                         0
922                 }
923
924                 fn disconnect_socket(&mut self) {}
925         }
926
927         #[cfg(c_bindings)]
928         type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
929         #[cfg(not(c_bindings))]
930         type LockingWrapper<T> = Mutex<T>;
931
932         type ChannelManager =
933                 channelmanager::ChannelManager<
934                         Arc<ChainMonitor>,
935                         Arc<test_utils::TestBroadcaster>,
936                         Arc<KeysManager>,
937                         Arc<KeysManager>,
938                         Arc<KeysManager>,
939                         Arc<test_utils::TestFeeEstimator>,
940                         Arc<DefaultRouter<
941                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
942                                 Arc<test_utils::TestLogger>,
943                                 Arc<LockingWrapper<TestScorer>>,
944                                 (),
945                                 TestScorer>
946                         >,
947                         Arc<test_utils::TestLogger>>;
948
949         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
950
951         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
952         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
953
954         struct Node {
955                 node: Arc<ChannelManager>,
956                 p2p_gossip_sync: PGS,
957                 rapid_gossip_sync: RGS,
958                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
959                 chain_monitor: Arc<ChainMonitor>,
960                 kv_store: Arc<FilesystemStore>,
961                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
962                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
963                 logger: Arc<test_utils::TestLogger>,
964                 best_block: BestBlock,
965                 scorer: Arc<LockingWrapper<TestScorer>>,
966         }
967
968         impl Node {
969                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
970                         GossipSync::P2P(self.p2p_gossip_sync.clone())
971                 }
972
973                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
974                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
975                 }
976
977                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
978                         GossipSync::None
979                 }
980         }
981
982         impl Drop for Node {
983                 fn drop(&mut self) {
984                         let data_dir = self.kv_store.get_data_dir();
985                         match fs::remove_dir_all(data_dir.clone()) {
986                                 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
987                                 _ => {}
988                         }
989                 }
990         }
991
992         struct Persister {
993                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
994                 graph_persistence_notifier: Option<SyncSender<()>>,
995                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
996                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
997                 kv_store: FilesystemStore,
998         }
999
1000         impl Persister {
1001                 fn new(data_dir: PathBuf) -> Self {
1002                         let kv_store = FilesystemStore::new(data_dir);
1003                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1004                 }
1005
1006                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1007                         Self { graph_error: Some((error, message)), ..self }
1008                 }
1009
1010                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1011                         Self { graph_persistence_notifier: Some(sender), ..self }
1012                 }
1013
1014                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1015                         Self { manager_error: Some((error, message)), ..self }
1016                 }
1017
1018                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1019                         Self { scorer_error: Some((error, message)), ..self }
1020                 }
1021         }
1022
1023         impl KVStore for Persister {
1024                 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1025                         self.kv_store.read(primary_namespace, secondary_namespace, key)
1026                 }
1027
1028                 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1029                         if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1030                                 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1031                                 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1032                         {
1033                                 if let Some((error, message)) = self.manager_error {
1034                                         return Err(std::io::Error::new(error, message))
1035                                 }
1036                         }
1037
1038                         if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1039                                 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1040                                 key == NETWORK_GRAPH_PERSISTENCE_KEY
1041                         {
1042                                 if let Some(sender) = &self.graph_persistence_notifier {
1043                                         match sender.send(()) {
1044                                                 Ok(()) => {},
1045                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1046                                         }
1047                                 };
1048
1049                                 if let Some((error, message)) = self.graph_error {
1050                                         return Err(std::io::Error::new(error, message))
1051                                 }
1052                         }
1053
1054                         if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1055                                 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1056                                 key == SCORER_PERSISTENCE_KEY
1057                         {
1058                                 if let Some((error, message)) = self.scorer_error {
1059                                         return Err(std::io::Error::new(error, message))
1060                                 }
1061                         }
1062
1063                         self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1064                 }
1065
1066                 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1067                         self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1068                 }
1069
1070                 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1071                         self.kv_store.list(primary_namespace, secondary_namespace)
1072                 }
1073         }
1074
1075         struct TestScorer {
1076                 event_expectations: Option<VecDeque<TestResult>>,
1077         }
1078
1079         #[derive(Debug)]
1080         enum TestResult {
1081                 PaymentFailure { path: Path, short_channel_id: u64 },
1082                 PaymentSuccess { path: Path },
1083                 ProbeFailure { path: Path },
1084                 ProbeSuccess { path: Path },
1085         }
1086
1087         impl TestScorer {
1088                 fn new() -> Self {
1089                         Self { event_expectations: None }
1090                 }
1091
1092                 fn expect(&mut self, expectation: TestResult) {
1093                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1094                 }
1095         }
1096
1097         impl lightning::util::ser::Writeable for TestScorer {
1098                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1099         }
1100
1101         impl ScoreLookUp for TestScorer {
1102                 type ScoreParams = ();
1103                 fn channel_penalty_msat(
1104                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1105                 ) -> u64 { unimplemented!(); }
1106         }
1107
1108         impl ScoreUpdate for TestScorer {
1109                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1110                         if let Some(expectations) = &mut self.event_expectations {
1111                                 match expectations.pop_front().unwrap() {
1112                                         TestResult::PaymentFailure { path, short_channel_id } => {
1113                                                 assert_eq!(actual_path, &path);
1114                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1115                                         },
1116                                         TestResult::PaymentSuccess { path } => {
1117                                                 panic!("Unexpected successful payment path: {:?}", path)
1118                                         },
1119                                         TestResult::ProbeFailure { path } => {
1120                                                 panic!("Unexpected probe failure: {:?}", path)
1121                                         },
1122                                         TestResult::ProbeSuccess { path } => {
1123                                                 panic!("Unexpected probe success: {:?}", path)
1124                                         }
1125                                 }
1126                         }
1127                 }
1128
1129                 fn payment_path_successful(&mut self, actual_path: &Path) {
1130                         if let Some(expectations) = &mut self.event_expectations {
1131                                 match expectations.pop_front().unwrap() {
1132                                         TestResult::PaymentFailure { path, .. } => {
1133                                                 panic!("Unexpected payment path failure: {:?}", path)
1134                                         },
1135                                         TestResult::PaymentSuccess { path } => {
1136                                                 assert_eq!(actual_path, &path);
1137                                         },
1138                                         TestResult::ProbeFailure { path } => {
1139                                                 panic!("Unexpected probe failure: {:?}", path)
1140                                         },
1141                                         TestResult::ProbeSuccess { path } => {
1142                                                 panic!("Unexpected probe success: {:?}", path)
1143                                         }
1144                                 }
1145                         }
1146                 }
1147
1148                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1149                         if let Some(expectations) = &mut self.event_expectations {
1150                                 match expectations.pop_front().unwrap() {
1151                                         TestResult::PaymentFailure { path, .. } => {
1152                                                 panic!("Unexpected payment path failure: {:?}", path)
1153                                         },
1154                                         TestResult::PaymentSuccess { path } => {
1155                                                 panic!("Unexpected payment path success: {:?}", path)
1156                                         },
1157                                         TestResult::ProbeFailure { path } => {
1158                                                 assert_eq!(actual_path, &path);
1159                                         },
1160                                         TestResult::ProbeSuccess { path } => {
1161                                                 panic!("Unexpected probe success: {:?}", path)
1162                                         }
1163                                 }
1164                         }
1165                 }
1166                 fn probe_successful(&mut self, actual_path: &Path) {
1167                         if let Some(expectations) = &mut self.event_expectations {
1168                                 match expectations.pop_front().unwrap() {
1169                                         TestResult::PaymentFailure { path, .. } => {
1170                                                 panic!("Unexpected payment path failure: {:?}", path)
1171                                         },
1172                                         TestResult::PaymentSuccess { path } => {
1173                                                 panic!("Unexpected payment path success: {:?}", path)
1174                                         },
1175                                         TestResult::ProbeFailure { path } => {
1176                                                 panic!("Unexpected probe failure: {:?}", path)
1177                                         },
1178                                         TestResult::ProbeSuccess { path } => {
1179                                                 assert_eq!(actual_path, &path);
1180                                         }
1181                                 }
1182                         }
1183                 }
1184         }
1185
1186         #[cfg(c_bindings)]
1187         impl lightning::routing::scoring::Score for TestScorer {}
1188
1189         impl Drop for TestScorer {
1190                 fn drop(&mut self) {
1191                         if std::thread::panicking() {
1192                                 return;
1193                         }
1194
1195                         if let Some(event_expectations) = &self.event_expectations {
1196                                 if !event_expectations.is_empty() {
1197                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1198                                 }
1199                         }
1200                 }
1201         }
1202
1203         fn get_full_filepath(filepath: String, filename: String) -> String {
1204                 let mut path = PathBuf::from(filepath);
1205                 path.push(filename);
1206                 path.to_str().unwrap().to_string()
1207         }
1208
1209         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1210                 let persist_temp_path = env::temp_dir().join(persist_dir);
1211                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1212                 let network = Network::Bitcoin;
1213                 let mut nodes = Vec::new();
1214                 for i in 0..num_nodes {
1215                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1216                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1217                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1218                         let genesis_block = genesis_block(network);
1219                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1220                         let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1221                         let seed = [i as u8; 32];
1222                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1223                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1224                         let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1225                         let now = Duration::from_secs(genesis_block.header.time as u64);
1226                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1227                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1228                         let best_block = BestBlock::from_network(network);
1229                         let params = ChainParameters { network, best_block };
1230                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1231                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1232                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1233                         let msg_handler = MessageHandler {
1234                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1235                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1236                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1237                         };
1238                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1239                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1240                         nodes.push(node);
1241                 }
1242
1243                 for i in 0..num_nodes {
1244                         for j in (i+1)..num_nodes {
1245                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1246                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1247                                 }, true).unwrap();
1248                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1249                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1250                                 }, false).unwrap();
1251                         }
1252                 }
1253
1254                 (persist_dir, nodes)
1255         }
1256
1257         macro_rules! open_channel {
1258                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1259                         begin_open_channel!($node_a, $node_b, $channel_value);
1260                         let events = $node_a.node.get_and_clear_pending_events();
1261                         assert_eq!(events.len(), 1);
1262                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1263                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1264                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1265                         get_event!($node_b, Event::ChannelPending);
1266                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1267                         get_event!($node_a, Event::ChannelPending);
1268                         tx
1269                 }}
1270         }
1271
1272         macro_rules! begin_open_channel {
1273                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1274                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1275                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1276                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1277                 }}
1278         }
1279
1280         macro_rules! handle_funding_generation_ready {
1281                 ($event: expr, $channel_value: expr) => {{
1282                         match $event {
1283                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1284                                         assert_eq!(channel_value_satoshis, $channel_value);
1285                                         assert_eq!(user_channel_id, 42);
1286
1287                                         let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1288                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1289                                         }]};
1290                                         (temporary_channel_id, tx)
1291                                 },
1292                                 _ => panic!("Unexpected event"),
1293                         }
1294                 }}
1295         }
1296
1297         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1298                 for i in 1..=depth {
1299                         let prev_blockhash = node.best_block.block_hash();
1300                         let height = node.best_block.height() + 1;
1301                         let header = create_dummy_header(prev_blockhash, height);
1302                         let txdata = vec![(0, tx)];
1303                         node.best_block = BestBlock::new(header.block_hash(), height);
1304                         match i {
1305                                 1 => {
1306                                         node.node.transactions_confirmed(&header, &txdata, height);
1307                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1308                                 },
1309                                 x if x == depth => {
1310                                         node.node.best_block_updated(&header, height);
1311                                         node.chain_monitor.best_block_updated(&header, height);
1312                                 },
1313                                 _ => {},
1314                         }
1315                 }
1316         }
1317         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1318                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1319         }
1320
1321         #[test]
1322         fn test_background_processor() {
1323                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1324                 // updates. Also test that when new updates are available, the manager signals that it needs
1325                 // re-persistence and is successfully re-persisted.
1326                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1327
1328                 // Go through the channel creation process so that each node has something to persist. Since
1329                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1330                 // avoid a race with processing events.
1331                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1332
1333                 // Initiate the background processors to watch each node.
1334                 let data_dir = nodes[0].kv_store.get_data_dir();
1335                 let persister = Arc::new(Persister::new(data_dir));
1336                 let event_handler = |_: _| {};
1337                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1338
1339                 macro_rules! check_persisted_data {
1340                         ($node: expr, $filepath: expr) => {
1341                                 let mut expected_bytes = Vec::new();
1342                                 loop {
1343                                         expected_bytes.clear();
1344                                         match $node.write(&mut expected_bytes) {
1345                                                 Ok(()) => {
1346                                                         match std::fs::read($filepath) {
1347                                                                 Ok(bytes) => {
1348                                                                         if bytes == expected_bytes {
1349                                                                                 break
1350                                                                         } else {
1351                                                                                 continue
1352                                                                         }
1353                                                                 },
1354                                                                 Err(_) => continue
1355                                                         }
1356                                                 },
1357                                                 Err(e) => panic!("Unexpected error: {}", e)
1358                                         }
1359                                 }
1360                         }
1361                 }
1362
1363                 // Check that the initial channel manager data is persisted as expected.
1364                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1365                 check_persisted_data!(nodes[0].node, filepath.clone());
1366
1367                 loop {
1368                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1369                 }
1370
1371                 // Force-close the channel.
1372                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1373
1374                 // Check that the force-close updates are persisted.
1375                 check_persisted_data!(nodes[0].node, filepath.clone());
1376                 loop {
1377                         if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1378                 }
1379
1380                 // Check network graph is persisted
1381                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1382                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1383
1384                 // Check scorer is persisted
1385                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1386                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1387
1388                 if !std::thread::panicking() {
1389                         bg_processor.stop().unwrap();
1390                 }
1391         }
1392
1393         #[test]
1394         fn test_timer_tick_called() {
1395                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1396                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1397                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1398                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1399                 let data_dir = nodes[0].kv_store.get_data_dir();
1400                 let persister = Arc::new(Persister::new(data_dir));
1401                 let event_handler = |_: _| {};
1402                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1403                 loop {
1404                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1405                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1406                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1407                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1408                         if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1409                                 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1410                                 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() {
1411                                 break
1412                         }
1413                 }
1414
1415                 if !std::thread::panicking() {
1416                         bg_processor.stop().unwrap();
1417                 }
1418         }
1419
1420         #[test]
1421         fn test_channel_manager_persist_error() {
1422                 // Test that if we encounter an error during manager persistence, the thread panics.
1423                 let (_, nodes) = create_nodes(2, "test_persist_error");
1424                 open_channel!(nodes[0], nodes[1], 100000);
1425
1426                 let data_dir = nodes[0].kv_store.get_data_dir();
1427                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1428                 let event_handler = |_: _| {};
1429                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1430                 match bg_processor.join() {
1431                         Ok(_) => panic!("Expected error persisting manager"),
1432                         Err(e) => {
1433                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1434                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1435                         },
1436                 }
1437         }
1438
1439         #[tokio::test]
1440         #[cfg(feature = "futures")]
1441         async fn test_channel_manager_persist_error_async() {
1442                 // Test that if we encounter an error during manager persistence, the thread panics.
1443                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1444                 open_channel!(nodes[0], nodes[1], 100000);
1445
1446                 let data_dir = nodes[0].kv_store.get_data_dir();
1447                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1448
1449                 let bp_future = super::process_events_async(
1450                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1451                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1452                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1453                                 Box::pin(async move {
1454                                         tokio::time::sleep(dur).await;
1455                                         false // Never exit
1456                                 })
1457                         }, false,
1458                 );
1459                 match bp_future.await {
1460                         Ok(_) => panic!("Expected error persisting manager"),
1461                         Err(e) => {
1462                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1463                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1464                         },
1465                 }
1466         }
1467
1468         #[test]
1469         fn test_network_graph_persist_error() {
1470                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1471                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1472                 let data_dir = nodes[0].kv_store.get_data_dir();
1473                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1474                 let event_handler = |_: _| {};
1475                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1476
1477                 match bg_processor.stop() {
1478                         Ok(_) => panic!("Expected error persisting network graph"),
1479                         Err(e) => {
1480                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1481                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1482                         },
1483                 }
1484         }
1485
1486         #[test]
1487         fn test_scorer_persist_error() {
1488                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1489                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1490                 let data_dir = nodes[0].kv_store.get_data_dir();
1491                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1492                 let event_handler = |_: _| {};
1493                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1494
1495                 match bg_processor.stop() {
1496                         Ok(_) => panic!("Expected error persisting scorer"),
1497                         Err(e) => {
1498                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1499                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1500                         },
1501                 }
1502         }
1503
1504         #[test]
1505         fn test_background_event_handling() {
1506                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1507                 let channel_value = 100000;
1508                 let data_dir = nodes[0].kv_store.get_data_dir();
1509                 let persister = Arc::new(Persister::new(data_dir.clone()));
1510
1511                 // Set up a background event handler for FundingGenerationReady events.
1512                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1513                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1514                 let event_handler = move |event: Event| match event {
1515                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1516                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1517                         Event::ChannelReady { .. } => {},
1518                         _ => panic!("Unexpected event: {:?}", event),
1519                 };
1520
1521                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1522
1523                 // Open a channel and check that the FundingGenerationReady event was handled.
1524                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1525                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1526                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1527                         .expect("FundingGenerationReady not handled within deadline");
1528                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1529                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1530                 get_event!(nodes[1], Event::ChannelPending);
1531                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1532                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1533                         .expect("ChannelPending not handled within deadline");
1534
1535                 // Confirm the funding transaction.
1536                 confirm_transaction(&mut nodes[0], &funding_tx);
1537                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1538                 confirm_transaction(&mut nodes[1], &funding_tx);
1539                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1540                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1541                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1542                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1543                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1544
1545                 if !std::thread::panicking() {
1546                         bg_processor.stop().unwrap();
1547                 }
1548
1549                 // Set up a background event handler for SpendableOutputs events.
1550                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1551                 let event_handler = move |event: Event| match event {
1552                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1553                         Event::ChannelReady { .. } => {},
1554                         Event::ChannelClosed { .. } => {},
1555                         _ => panic!("Unexpected event: {:?}", event),
1556                 };
1557                 let persister = Arc::new(Persister::new(data_dir));
1558                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1559
1560                 // Force close the channel and check that the SpendableOutputs event was handled.
1561                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1562                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1563                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1564
1565                 let event = receiver
1566                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1567                         .expect("Events not handled within deadline");
1568                 match event {
1569                         Event::SpendableOutputs { .. } => {},
1570                         _ => panic!("Unexpected event: {:?}", event),
1571                 }
1572
1573                 if !std::thread::panicking() {
1574                         bg_processor.stop().unwrap();
1575                 }
1576         }
1577
1578         #[test]
1579         fn test_scorer_persistence() {
1580                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1581                 let data_dir = nodes[0].kv_store.get_data_dir();
1582                 let persister = Arc::new(Persister::new(data_dir));
1583                 let event_handler = |_: _| {};
1584                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1585
1586                 loop {
1587                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1588                         let expected_log = "Persisting scorer".to_string();
1589                         if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1590                                 break
1591                         }
1592                 }
1593
1594                 if !std::thread::panicking() {
1595                         bg_processor.stop().unwrap();
1596                 }
1597         }
1598
1599         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1600                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1601                         let features = ChannelFeatures::empty();
1602                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1603                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1604                         ).expect("Failed to update channel from partial announcement");
1605                         let original_graph_description = $nodes[0].network_graph.to_string();
1606                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1607                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1608
1609                         loop {
1610                                 $sleep;
1611                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1612                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1613                                 if *log_entries.get(&("lightning_background_processor", loop_counter))
1614                                         .unwrap_or(&0) > 1
1615                                 {
1616                                         // Wait until the loop has gone around at least twice.
1617                                         break
1618                                 }
1619                         }
1620
1621                         let initialization_input = vec![
1622                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1623                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1624                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1625                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1626                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1627                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1628                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1629                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1630                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1631                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1632                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1633                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1634                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1635                         ];
1636                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1637
1638                         // this should have added two channels and pruned the previous one.
1639                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1640
1641                         $receive.expect("Network graph not pruned within deadline");
1642
1643                         // all channels should now be pruned
1644                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1645                 }
1646         }
1647
1648         #[test]
1649         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1650                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1651
1652                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1653                 let data_dir = nodes[0].kv_store.get_data_dir();
1654                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1655
1656                 let event_handler = |_: _| {};
1657                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1658
1659                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1660                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1661                         std::thread::sleep(Duration::from_millis(1)));
1662
1663                 background_processor.stop().unwrap();
1664         }
1665
1666         #[tokio::test]
1667         #[cfg(feature = "futures")]
1668         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1669                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1670
1671                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1672                 let data_dir = nodes[0].kv_store.get_data_dir();
1673                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1674
1675                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1676                 let bp_future = super::process_events_async(
1677                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1678                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1679                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1680                                 let mut exit_receiver = exit_receiver.clone();
1681                                 Box::pin(async move {
1682                                         tokio::select! {
1683                                                 _ = tokio::time::sleep(dur) => false,
1684                                                 _ = exit_receiver.changed() => true,
1685                                         }
1686                                 })
1687                         }, false,
1688                 );
1689
1690                 let t1 = tokio::spawn(bp_future);
1691                 let t2 = tokio::spawn(async move {
1692                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1693                                 let mut i = 0;
1694                                 loop {
1695                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1696                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1697                                         assert!(i < 5);
1698                                         i += 1;
1699                                 }
1700                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1701                         exit_sender.send(()).unwrap();
1702                 });
1703                 let (r1, r2) = tokio::join!(t1, t2);
1704                 r1.unwrap().unwrap();
1705                 r2.unwrap()
1706         }
1707
1708         macro_rules! do_test_payment_path_scoring {
1709                 ($nodes: expr, $receive: expr) => {
1710                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1711                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1712                         // public or else we won't score it).
1713                         // A background event handler for FundingGenerationReady events must be hooked up to a
1714                         // running background processor.
1715                         let scored_scid = 4242;
1716                         let secp_ctx = Secp256k1::new();
1717                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1718                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1719
1720                         let path = Path { hops: vec![RouteHop {
1721                                 pubkey: node_1_id,
1722                                 node_features: NodeFeatures::empty(),
1723                                 short_channel_id: scored_scid,
1724                                 channel_features: ChannelFeatures::empty(),
1725                                 fee_msat: 0,
1726                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1727                                 maybe_announced_channel: true,
1728                         }], blinded_tail: None };
1729
1730                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1731                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1732                                 payment_id: None,
1733                                 payment_hash: PaymentHash([42; 32]),
1734                                 payment_failed_permanently: false,
1735                                 failure: PathFailure::OnPath { network_update: None },
1736                                 path: path.clone(),
1737                                 short_channel_id: Some(scored_scid),
1738                         });
1739                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1740                         match event {
1741                                 Event::PaymentPathFailed { .. } => {},
1742                                 _ => panic!("Unexpected event"),
1743                         }
1744
1745                         // Ensure we'll score payments that were explicitly failed back by the destination as
1746                         // ProbeSuccess.
1747                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1748                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1749                                 payment_id: None,
1750                                 payment_hash: PaymentHash([42; 32]),
1751                                 payment_failed_permanently: true,
1752                                 failure: PathFailure::OnPath { network_update: None },
1753                                 path: path.clone(),
1754                                 short_channel_id: None,
1755                         });
1756                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1757                         match event {
1758                                 Event::PaymentPathFailed { .. } => {},
1759                                 _ => panic!("Unexpected event"),
1760                         }
1761
1762                         $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1763                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1764                                 payment_id: PaymentId([42; 32]),
1765                                 payment_hash: None,
1766                                 path: path.clone(),
1767                         });
1768                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1769                         match event {
1770                                 Event::PaymentPathSuccessful { .. } => {},
1771                                 _ => panic!("Unexpected event"),
1772                         }
1773
1774                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1775                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1776                                 payment_id: PaymentId([42; 32]),
1777                                 payment_hash: PaymentHash([42; 32]),
1778                                 path: path.clone(),
1779                         });
1780                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1781                         match event {
1782                                 Event::ProbeSuccessful  { .. } => {},
1783                                 _ => panic!("Unexpected event"),
1784                         }
1785
1786                         $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1787                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1788                                 payment_id: PaymentId([42; 32]),
1789                                 payment_hash: PaymentHash([42; 32]),
1790                                 path,
1791                                 short_channel_id: Some(scored_scid),
1792                         });
1793                         let event = $receive.expect("ProbeFailure not handled within deadline");
1794                         match event {
1795                                 Event::ProbeFailed { .. } => {},
1796                                 _ => panic!("Unexpected event"),
1797                         }
1798                 }
1799         }
1800
1801         #[test]
1802         fn test_payment_path_scoring() {
1803                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1804                 let event_handler = move |event: Event| match event {
1805                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1806                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1807                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1808                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1809                         _ => panic!("Unexpected event: {:?}", event),
1810                 };
1811
1812                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1813                 let data_dir = nodes[0].kv_store.get_data_dir();
1814                 let persister = Arc::new(Persister::new(data_dir));
1815                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1816
1817                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1818
1819                 if !std::thread::panicking() {
1820                         bg_processor.stop().unwrap();
1821                 }
1822
1823                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1824                 let expected_log = "Persisting scorer after update".to_string();
1825                 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1826         }
1827
1828         #[tokio::test]
1829         #[cfg(feature = "futures")]
1830         async fn test_payment_path_scoring_async() {
1831                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1832                 let event_handler = move |event: Event| {
1833                         let sender_ref = sender.clone();
1834                         async move {
1835                                 match event {
1836                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1837                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1838                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1839                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1840                                         _ => panic!("Unexpected event: {:?}", event),
1841                                 }
1842                         }
1843                 };
1844
1845                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1846                 let data_dir = nodes[0].kv_store.get_data_dir();
1847                 let persister = Arc::new(Persister::new(data_dir));
1848
1849                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1850
1851                 let bp_future = super::process_events_async(
1852                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1853                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1854                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1855                                 let mut exit_receiver = exit_receiver.clone();
1856                                 Box::pin(async move {
1857                                         tokio::select! {
1858                                                 _ = tokio::time::sleep(dur) => false,
1859                                                 _ = exit_receiver.changed() => true,
1860                                         }
1861                                 })
1862                         }, false,
1863                 );
1864                 let t1 = tokio::spawn(bp_future);
1865                 let t2 = tokio::spawn(async move {
1866                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1867                         exit_sender.send(()).unwrap();
1868
1869                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1870                         let expected_log = "Persisting scorer after update".to_string();
1871                         assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1872                 });
1873
1874                 let (r1, r2) = tokio::join!(t1, t2);
1875                 r1.unwrap().unwrap();
1876                 r2.unwrap()
1877         }
1878 }