Ensure `background-processor` exits after any sleep future says to
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
68 ///   at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(feature = "futures")]
120 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
121 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
122 #[cfg(feature = "futures")]
123 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
124         min_u64(SCORER_PERSIST_TIMER, FIRST_NETWORK_PRUNE_TIMER));
125
126 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
127 pub enum GossipSync<
128         P: Deref<Target = P2PGossipSync<G, U, L>>,
129         R: Deref<Target = RapidGossipSync<G, L>>,
130         G: Deref<Target = NetworkGraph<L>>,
131         U: Deref,
132         L: Deref,
133 >
134 where U::Target: UtxoLookup, L::Target: Logger {
135         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
136         P2P(P),
137         /// Rapid gossip sync from a trusted server.
138         Rapid(R),
139         /// No gossip sync.
140         None,
141 }
142
143 impl<
144         P: Deref<Target = P2PGossipSync<G, U, L>>,
145         R: Deref<Target = RapidGossipSync<G, L>>,
146         G: Deref<Target = NetworkGraph<L>>,
147         U: Deref,
148         L: Deref,
149 > GossipSync<P, R, G, U, L>
150 where U::Target: UtxoLookup, L::Target: Logger {
151         fn network_graph(&self) -> Option<&G> {
152                 match self {
153                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
154                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
155                         GossipSync::None => None,
156                 }
157         }
158
159         fn prunable_network_graph(&self) -> Option<&G> {
160                 match self {
161                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
162                         GossipSync::Rapid(gossip_sync) => {
163                                 if gossip_sync.is_initial_sync_complete() {
164                                         Some(gossip_sync.network_graph())
165                                 } else {
166                                         None
167                                 }
168                         },
169                         GossipSync::None => None,
170                 }
171         }
172 }
173
174 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
175 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
176         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
177 where
178         U::Target: UtxoLookup,
179         L::Target: Logger,
180 {
181         /// Initializes a new [`GossipSync::P2P`] variant.
182         pub fn p2p(gossip_sync: P) -> Self {
183                 GossipSync::P2P(gossip_sync)
184         }
185 }
186
187 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
188 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
189         GossipSync<
190                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
191                 R,
192                 G,
193                 &'a (dyn UtxoLookup + Send + Sync),
194                 L,
195         >
196 where
197         L::Target: Logger,
198 {
199         /// Initializes a new [`GossipSync::Rapid`] variant.
200         pub fn rapid(gossip_sync: R) -> Self {
201                 GossipSync::Rapid(gossip_sync)
202         }
203 }
204
205 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
206 impl<'a, L: Deref>
207         GossipSync<
208                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
209                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
210                 &'a NetworkGraph<L>,
211                 &'a (dyn UtxoLookup + Send + Sync),
212                 L,
213         >
214 where
215         L::Target: Logger,
216 {
217         /// Initializes a new [`GossipSync::None`] variant.
218         pub fn none() -> Self {
219                 GossipSync::None
220         }
221 }
222
223 fn handle_network_graph_update<L: Deref>(
224         network_graph: &NetworkGraph<L>, event: &Event
225 ) where L::Target: Logger {
226         if let Event::PaymentPathFailed {
227                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
228         {
229                 network_graph.handle_network_update(upd);
230         }
231 }
232
233 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
234         scorer: &'a S, event: &Event
235 ) {
236         let mut score = scorer.lock();
237         match event {
238                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
239                         let path = path.iter().collect::<Vec<_>>();
240                         score.payment_path_failed(&path, *scid);
241                 },
242                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
243                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
244                         // because the payment made it all the way to the destination with sufficient liquidity.
245                         let path = path.iter().collect::<Vec<_>>();
246                         score.probe_successful(&path);
247                 },
248                 Event::PaymentPathSuccessful { path, .. } => {
249                         let path = path.iter().collect::<Vec<_>>();
250                         score.payment_path_successful(&path);
251                 },
252                 Event::ProbeSuccessful { path, .. } => {
253                         let path = path.iter().collect::<Vec<_>>();
254                         score.probe_successful(&path);
255                 },
256                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
257                         let path = path.iter().collect::<Vec<_>>();
258                         score.probe_failed(&path, *scid);
259                 },
260                 _ => {},
261         }
262 }
263
264 macro_rules! define_run_body {
265         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
266          $channel_manager: ident, $process_channel_manager_events: expr,
267          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
268          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
269          $check_slow_await: expr)
270         => { {
271                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
272                 $channel_manager.timer_tick_occurred();
273
274                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
275                 let mut last_ping_call = $get_timer(PING_TIMER);
276                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
277                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
278                 let mut have_pruned = false;
279
280                 loop {
281                         $process_channel_manager_events;
282                         $process_chain_monitor_events;
283
284                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
285                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
286                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
287                         // without running the normal event processing above and handing events to users.
288                         //
289                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
290                         // processing a message effectively at any point during this loop. In order to
291                         // minimize the time between such processing completing and persisting the updated
292                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
293                         // generally, and as a fallback place such blocking only immediately before
294                         // persistence.
295                         $peer_manager.process_events();
296
297                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
298                         // see `await_start`'s use below.
299                         let mut await_start = None;
300                         if $check_slow_await { await_start = Some($get_timer(1)); }
301                         let updates_available = $await;
302                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
303
304                         if updates_available {
305                                 log_trace!($logger, "Persisting ChannelManager...");
306                                 $persister.persist_manager(&*$channel_manager)?;
307                                 log_trace!($logger, "Done persisting ChannelManager.");
308                         }
309                         // Exit the loop if the background processor was requested to stop.
310                         if $loop_exit_check {
311                                 log_trace!($logger, "Terminating background processor.");
312                                 break;
313                         }
314                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
315                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
316                                 $channel_manager.timer_tick_occurred();
317                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
318                         }
319                         if await_slow {
320                                 // On various platforms, we may be starved of CPU cycles for several reasons.
321                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
322                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
323                                 // may not get any cycles.
324                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
325                                 // full second, at which point we assume sockets may have been killed (they
326                                 // appear to be at least on some platforms, even if it has only been a second).
327                                 // Note that we have to take care to not get here just because user event
328                                 // processing was slow at the top of the loop. For example, the sample client
329                                 // may call Bitcoin Core RPCs during event handling, which very often takes
330                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
331                                 // peers.
332                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
333                                 $peer_manager.disconnect_all_peers();
334                                 last_ping_call = $get_timer(PING_TIMER);
335                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
336                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
337                                 $peer_manager.timer_tick_occurred();
338                                 last_ping_call = $get_timer(PING_TIMER);
339                         }
340
341                         // Note that we want to run a graph prune once not long after startup before
342                         // falling back to our usual hourly prunes. This avoids short-lived clients never
343                         // pruning their network graph. We run once 60 seconds after startup before
344                         // continuing our normal cadence.
345                         if $timer_elapsed(&mut last_prune_call, if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }) {
346                                 // The network graph must not be pruned while rapid sync completion is pending
347                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
348                                         #[cfg(feature = "std")] {
349                                                 log_trace!($logger, "Pruning and persisting network graph.");
350                                                 network_graph.remove_stale_channels_and_tracking();
351                                         }
352                                         #[cfg(not(feature = "std"))] {
353                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
354                                                 log_trace!($logger, "Persisting network graph.");
355                                         }
356
357                                         if let Err(e) = $persister.persist_graph(network_graph) {
358                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
359                                         }
360
361                                         have_pruned = true;
362                                 }
363                                 last_prune_call = $get_timer(NETWORK_PRUNE_TIMER);
364                         }
365
366                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
367                                 if let Some(ref scorer) = $scorer {
368                                         log_trace!($logger, "Persisting scorer");
369                                         if let Err(e) = $persister.persist_scorer(&scorer) {
370                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
371                                         }
372                                 }
373                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
374                         }
375                 }
376
377                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
378                 // some races where users quit while channel updates were in-flight, with
379                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
380                 $persister.persist_manager(&*$channel_manager)?;
381
382                 // Persist Scorer on exit
383                 if let Some(ref scorer) = $scorer {
384                         $persister.persist_scorer(&scorer)?;
385                 }
386
387                 // Persist NetworkGraph on exit
388                 if let Some(network_graph) = $gossip_sync.network_graph() {
389                         $persister.persist_graph(network_graph)?;
390                 }
391
392                 Ok(())
393         } }
394 }
395
396 #[cfg(feature = "futures")]
397 pub(crate) mod futures_util {
398         use core::future::Future;
399         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
400         use core::pin::Pin;
401         use core::marker::Unpin;
402         pub(crate) struct Selector<
403                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
404         > {
405                 pub a: A,
406                 pub b: B,
407                 pub c: C,
408         }
409         pub(crate) enum SelectorOutput {
410                 A, B, C(bool),
411         }
412
413         impl<
414                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
415         > Future for Selector<A, B, C> {
416                 type Output = SelectorOutput;
417                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
418                         match Pin::new(&mut self.a).poll(ctx) {
419                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
420                                 Poll::Pending => {},
421                         }
422                         match Pin::new(&mut self.b).poll(ctx) {
423                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
424                                 Poll::Pending => {},
425                         }
426                         match Pin::new(&mut self.c).poll(ctx) {
427                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
428                                 Poll::Pending => {},
429                         }
430                         Poll::Pending
431                 }
432         }
433
434         // If we want to poll a future without an async context to figure out if it has completed or
435         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
436         // but sadly there's a good bit of boilerplate here.
437         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
438         fn dummy_waker_action(_: *const ()) { }
439
440         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
441                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
442         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
443 }
444 #[cfg(feature = "futures")]
445 use futures_util::{Selector, SelectorOutput, dummy_waker};
446 #[cfg(feature = "futures")]
447 use core::task;
448
449 /// Processes background events in a future.
450 ///
451 /// `sleeper` should return a future which completes in the given amount of time and returns a
452 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
453 /// future which outputs true, the loop will exit and this function's future will complete.
454 ///
455 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
456 ///
457 /// Requires the `futures` feature. Note that while this method is available without the `std`
458 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
459 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
460 /// manually instead.
461 ///
462 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
463 /// mobile device, where we may need to check for interruption of the application regularly. If you
464 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
465 /// are hundreds or thousands of simultaneous process calls running.
466 #[cfg(feature = "futures")]
467 pub async fn process_events_async<
468         'a,
469         UL: 'static + Deref + Send + Sync,
470         CF: 'static + Deref + Send + Sync,
471         CW: 'static + Deref + Send + Sync,
472         T: 'static + Deref + Send + Sync,
473         ES: 'static + Deref + Send + Sync,
474         NS: 'static + Deref + Send + Sync,
475         SP: 'static + Deref + Send + Sync,
476         F: 'static + Deref + Send + Sync,
477         R: 'static + Deref + Send + Sync,
478         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
479         L: 'static + Deref + Send + Sync,
480         P: 'static + Deref + Send + Sync,
481         Descriptor: 'static + SocketDescriptor + Send + Sync,
482         CMH: 'static + Deref + Send + Sync,
483         RMH: 'static + Deref + Send + Sync,
484         OMH: 'static + Deref + Send + Sync,
485         EventHandlerFuture: core::future::Future<Output = ()>,
486         EventHandler: Fn(Event) -> EventHandlerFuture,
487         PS: 'static + Deref + Send,
488         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
489         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
490         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
491         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
492         UMH: 'static + Deref + Send + Sync,
493         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
494         S: 'static + Deref<Target = SC> + Send + Sync,
495         SC: for<'b> WriteableScore<'b>,
496         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
497         Sleeper: Fn(Duration) -> SleepFuture
498 >(
499         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
500         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
501         sleeper: Sleeper, mobile_interruptable_platform: bool,
502 ) -> Result<(), lightning::io::Error>
503 where
504         UL::Target: 'static + UtxoLookup,
505         CF::Target: 'static + chain::Filter,
506         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
507         T::Target: 'static + BroadcasterInterface,
508         ES::Target: 'static + EntropySource,
509         NS::Target: 'static + NodeSigner,
510         SP::Target: 'static + SignerProvider,
511         F::Target: 'static + FeeEstimator,
512         R::Target: 'static + Router,
513         L::Target: 'static + Logger,
514         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
515         CMH::Target: 'static + ChannelMessageHandler,
516         OMH::Target: 'static + OnionMessageHandler,
517         RMH::Target: 'static + RoutingMessageHandler,
518         UMH::Target: 'static + CustomMessageHandler,
519         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
520 {
521         let mut should_break = false;
522         let async_event_handler = |event| {
523                 let network_graph = gossip_sync.network_graph();
524                 let event_handler = &event_handler;
525                 let scorer = &scorer;
526                 async move {
527                         if let Some(network_graph) = network_graph {
528                                 handle_network_graph_update(network_graph, &event)
529                         }
530                         if let Some(ref scorer) = scorer {
531                                 update_scorer(scorer, &event);
532                         }
533                         event_handler(event).await;
534                 }
535         };
536         define_run_body!(persister,
537                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
538                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
539                 gossip_sync, peer_manager, logger, scorer, should_break, {
540                         let fut = Selector {
541                                 a: channel_manager.get_persistable_update_future(),
542                                 b: chain_monitor.get_update_future(),
543                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
544                         };
545                         match fut.await {
546                                 SelectorOutput::A => true,
547                                 SelectorOutput::B => false,
548                                 SelectorOutput::C(exit) => {
549                                         should_break = exit;
550                                         false
551                                 }
552                         }
553                 }, |t| sleeper(Duration::from_secs(t)),
554                 |fut: &mut SleepFuture, _| {
555                         let mut waker = dummy_waker();
556                         let mut ctx = task::Context::from_waker(&mut waker);
557                         match core::pin::Pin::new(fut).poll(&mut ctx) {
558                                 task::Poll::Ready(exit) => { should_break = exit; true },
559                                 task::Poll::Pending => false,
560                         }
561                 }, mobile_interruptable_platform)
562 }
563
564 #[cfg(feature = "std")]
565 impl BackgroundProcessor {
566         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
567         /// documentation].
568         ///
569         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
570         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
571         /// either [`join`] or [`stop`].
572         ///
573         /// # Data Persistence
574         ///
575         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
576         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
577         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
578         /// provided implementation.
579         ///
580         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
581         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
582         /// See the `lightning-persister` crate for LDK's provided implementation.
583         ///
584         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
585         /// error or call [`join`] and handle any error that may arise. For the latter case,
586         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
587         ///
588         /// # Event Handling
589         ///
590         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
591         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
592         /// functionality implemented by other handlers.
593         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
594         ///
595         /// # Rapid Gossip Sync
596         ///
597         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
598         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
599         /// until the [`RapidGossipSync`] instance completes its first sync.
600         ///
601         /// [top-level documentation]: BackgroundProcessor
602         /// [`join`]: Self::join
603         /// [`stop`]: Self::stop
604         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
605         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
606         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
607         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
608         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
609         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
610         pub fn start<
611                 'a,
612                 UL: 'static + Deref + Send + Sync,
613                 CF: 'static + Deref + Send + Sync,
614                 CW: 'static + Deref + Send + Sync,
615                 T: 'static + Deref + Send + Sync,
616                 ES: 'static + Deref + Send + Sync,
617                 NS: 'static + Deref + Send + Sync,
618                 SP: 'static + Deref + Send + Sync,
619                 F: 'static + Deref + Send + Sync,
620                 R: 'static + Deref + Send + Sync,
621                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
622                 L: 'static + Deref + Send + Sync,
623                 P: 'static + Deref + Send + Sync,
624                 Descriptor: 'static + SocketDescriptor + Send + Sync,
625                 CMH: 'static + Deref + Send + Sync,
626                 OMH: 'static + Deref + Send + Sync,
627                 RMH: 'static + Deref + Send + Sync,
628                 EH: 'static + EventHandler + Send,
629                 PS: 'static + Deref + Send,
630                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
631                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
632                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
633                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
634                 UMH: 'static + Deref + Send + Sync,
635                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
636                 S: 'static + Deref<Target = SC> + Send + Sync,
637                 SC: for <'b> WriteableScore<'b>,
638         >(
639                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
640                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
641         ) -> Self
642         where
643                 UL::Target: 'static + UtxoLookup,
644                 CF::Target: 'static + chain::Filter,
645                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
646                 T::Target: 'static + BroadcasterInterface,
647                 ES::Target: 'static + EntropySource,
648                 NS::Target: 'static + NodeSigner,
649                 SP::Target: 'static + SignerProvider,
650                 F::Target: 'static + FeeEstimator,
651                 R::Target: 'static + Router,
652                 L::Target: 'static + Logger,
653                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
654                 CMH::Target: 'static + ChannelMessageHandler,
655                 OMH::Target: 'static + OnionMessageHandler,
656                 RMH::Target: 'static + RoutingMessageHandler,
657                 UMH::Target: 'static + CustomMessageHandler,
658                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
659         {
660                 let stop_thread = Arc::new(AtomicBool::new(false));
661                 let stop_thread_clone = stop_thread.clone();
662                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
663                         let event_handler = |event| {
664                                 let network_graph = gossip_sync.network_graph();
665                                 if let Some(network_graph) = network_graph {
666                                         handle_network_graph_update(network_graph, &event)
667                                 }
668                                 if let Some(ref scorer) = scorer {
669                                         update_scorer(scorer, &event);
670                                 }
671                                 event_handler.handle_event(event);
672                         };
673                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
674                                 channel_manager, channel_manager.process_pending_events(&event_handler),
675                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
676                                 Sleeper::from_two_futures(
677                                         channel_manager.get_persistable_update_future(),
678                                         chain_monitor.get_update_future()
679                                 ).wait_timeout(Duration::from_millis(100)),
680                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
681                 });
682                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
683         }
684
685         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
686         /// [`ChannelManager`].
687         ///
688         /// # Panics
689         ///
690         /// This function panics if the background thread has panicked such as while persisting or
691         /// handling events.
692         ///
693         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
694         pub fn join(mut self) -> Result<(), std::io::Error> {
695                 assert!(self.thread_handle.is_some());
696                 self.join_thread()
697         }
698
699         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
700         /// [`ChannelManager`].
701         ///
702         /// # Panics
703         ///
704         /// This function panics if the background thread has panicked such as while persisting or
705         /// handling events.
706         ///
707         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
708         pub fn stop(mut self) -> Result<(), std::io::Error> {
709                 assert!(self.thread_handle.is_some());
710                 self.stop_and_join_thread()
711         }
712
713         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
714                 self.stop_thread.store(true, Ordering::Release);
715                 self.join_thread()
716         }
717
718         fn join_thread(&mut self) -> Result<(), std::io::Error> {
719                 match self.thread_handle.take() {
720                         Some(handle) => handle.join().unwrap(),
721                         None => Ok(()),
722                 }
723         }
724 }
725
726 #[cfg(feature = "std")]
727 impl Drop for BackgroundProcessor {
728         fn drop(&mut self) {
729                 self.stop_and_join_thread().unwrap();
730         }
731 }
732
733 #[cfg(all(feature = "std", test))]
734 mod tests {
735         use bitcoin::blockdata::block::BlockHeader;
736         use bitcoin::blockdata::constants::genesis_block;
737         use bitcoin::blockdata::locktime::PackedLockTime;
738         use bitcoin::blockdata::transaction::{Transaction, TxOut};
739         use bitcoin::network::constants::Network;
740         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
741         use lightning::chain::{BestBlock, Confirm, chainmonitor};
742         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
743         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
744         use lightning::chain::transaction::OutPoint;
745         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
746         use lightning::{get_event_msg, get_event};
747         use lightning::ln::PaymentHash;
748         use lightning::ln::channelmanager;
749         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
750         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
751         use lightning::ln::msgs::{ChannelMessageHandler, Init};
752         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
753         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
754         use lightning::routing::router::{DefaultRouter, RouteHop};
755         use lightning::routing::scoring::{ChannelUsage, Score};
756         use lightning::util::config::UserConfig;
757         use lightning::util::ser::Writeable;
758         use lightning::util::test_utils;
759         use lightning::util::persist::KVStorePersister;
760         use lightning_persister::FilesystemPersister;
761         use std::collections::VecDeque;
762         use std::fs;
763         use std::path::PathBuf;
764         use std::sync::{Arc, Mutex};
765         use std::sync::mpsc::SyncSender;
766         use std::time::Duration;
767         use bitcoin::hashes::Hash;
768         use bitcoin::TxMerkleNode;
769         use lightning_rapid_gossip_sync::RapidGossipSync;
770         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
771
772         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
773
774         #[derive(Clone, Hash, PartialEq, Eq)]
775         struct TestDescriptor{}
776         impl SocketDescriptor for TestDescriptor {
777                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
778                         0
779                 }
780
781                 fn disconnect_socket(&mut self) {}
782         }
783
784         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
785
786         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
787
788         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
789         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
790
791         struct Node {
792                 node: Arc<ChannelManager>,
793                 p2p_gossip_sync: PGS,
794                 rapid_gossip_sync: RGS,
795                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
796                 chain_monitor: Arc<ChainMonitor>,
797                 persister: Arc<FilesystemPersister>,
798                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
799                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
800                 logger: Arc<test_utils::TestLogger>,
801                 best_block: BestBlock,
802                 scorer: Arc<Mutex<TestScorer>>,
803         }
804
805         impl Node {
806                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
807                         GossipSync::P2P(self.p2p_gossip_sync.clone())
808                 }
809
810                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
811                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
812                 }
813
814                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
815                         GossipSync::None
816                 }
817         }
818
819         impl Drop for Node {
820                 fn drop(&mut self) {
821                         let data_dir = self.persister.get_data_dir();
822                         match fs::remove_dir_all(data_dir.clone()) {
823                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
824                                 _ => {}
825                         }
826                 }
827         }
828
829         struct Persister {
830                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
831                 graph_persistence_notifier: Option<SyncSender<()>>,
832                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
833                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
834                 filesystem_persister: FilesystemPersister,
835         }
836
837         impl Persister {
838                 fn new(data_dir: String) -> Self {
839                         let filesystem_persister = FilesystemPersister::new(data_dir);
840                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
841                 }
842
843                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
844                         Self { graph_error: Some((error, message)), ..self }
845                 }
846
847                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
848                         Self { graph_persistence_notifier: Some(sender), ..self }
849                 }
850
851                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
852                         Self { manager_error: Some((error, message)), ..self }
853                 }
854
855                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
856                         Self { scorer_error: Some((error, message)), ..self }
857                 }
858         }
859
860         impl KVStorePersister for Persister {
861                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
862                         if key == "manager" {
863                                 if let Some((error, message)) = self.manager_error {
864                                         return Err(std::io::Error::new(error, message))
865                                 }
866                         }
867
868                         if key == "network_graph" {
869                                 if let Some(sender) = &self.graph_persistence_notifier {
870                                         sender.send(()).unwrap();
871                                 };
872
873                                 if let Some((error, message)) = self.graph_error {
874                                         return Err(std::io::Error::new(error, message))
875                                 }
876                         }
877
878                         if key == "scorer" {
879                                 if let Some((error, message)) = self.scorer_error {
880                                         return Err(std::io::Error::new(error, message))
881                                 }
882                         }
883
884                         self.filesystem_persister.persist(key, object)
885                 }
886         }
887
888         struct TestScorer {
889                 event_expectations: Option<VecDeque<TestResult>>,
890         }
891
892         #[derive(Debug)]
893         enum TestResult {
894                 PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
895                 PaymentSuccess { path: Vec<RouteHop> },
896                 ProbeFailure { path: Vec<RouteHop> },
897                 ProbeSuccess { path: Vec<RouteHop> },
898         }
899
900         impl TestScorer {
901                 fn new() -> Self {
902                         Self { event_expectations: None }
903                 }
904
905                 fn expect(&mut self, expectation: TestResult) {
906                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
907                 }
908         }
909
910         impl lightning::util::ser::Writeable for TestScorer {
911                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
912         }
913
914         impl Score for TestScorer {
915                 fn channel_penalty_msat(
916                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
917                 ) -> u64 { unimplemented!(); }
918
919                 fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
920                         if let Some(expectations) = &mut self.event_expectations {
921                                 match expectations.pop_front().unwrap() {
922                                         TestResult::PaymentFailure { path, short_channel_id } => {
923                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
924                                                 assert_eq!(actual_short_channel_id, short_channel_id);
925                                         },
926                                         TestResult::PaymentSuccess { path } => {
927                                                 panic!("Unexpected successful payment path: {:?}", path)
928                                         },
929                                         TestResult::ProbeFailure { path } => {
930                                                 panic!("Unexpected probe failure: {:?}", path)
931                                         },
932                                         TestResult::ProbeSuccess { path } => {
933                                                 panic!("Unexpected probe success: {:?}", path)
934                                         }
935                                 }
936                         }
937                 }
938
939                 fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
940                         if let Some(expectations) = &mut self.event_expectations {
941                                 match expectations.pop_front().unwrap() {
942                                         TestResult::PaymentFailure { path, .. } => {
943                                                 panic!("Unexpected payment path failure: {:?}", path)
944                                         },
945                                         TestResult::PaymentSuccess { path } => {
946                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
947                                         },
948                                         TestResult::ProbeFailure { path } => {
949                                                 panic!("Unexpected probe failure: {:?}", path)
950                                         },
951                                         TestResult::ProbeSuccess { path } => {
952                                                 panic!("Unexpected probe success: {:?}", path)
953                                         }
954                                 }
955                         }
956                 }
957
958                 fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
959                         if let Some(expectations) = &mut self.event_expectations {
960                                 match expectations.pop_front().unwrap() {
961                                         TestResult::PaymentFailure { path, .. } => {
962                                                 panic!("Unexpected payment path failure: {:?}", path)
963                                         },
964                                         TestResult::PaymentSuccess { path } => {
965                                                 panic!("Unexpected payment path success: {:?}", path)
966                                         },
967                                         TestResult::ProbeFailure { path } => {
968                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
969                                         },
970                                         TestResult::ProbeSuccess { path } => {
971                                                 panic!("Unexpected probe success: {:?}", path)
972                                         }
973                                 }
974                         }
975                 }
976                 fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
977                         if let Some(expectations) = &mut self.event_expectations {
978                                 match expectations.pop_front().unwrap() {
979                                         TestResult::PaymentFailure { path, .. } => {
980                                                 panic!("Unexpected payment path failure: {:?}", path)
981                                         },
982                                         TestResult::PaymentSuccess { path } => {
983                                                 panic!("Unexpected payment path success: {:?}", path)
984                                         },
985                                         TestResult::ProbeFailure { path } => {
986                                                 panic!("Unexpected probe failure: {:?}", path)
987                                         },
988                                         TestResult::ProbeSuccess { path } => {
989                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
990                                         }
991                                 }
992                         }
993                 }
994         }
995
996         impl Drop for TestScorer {
997                 fn drop(&mut self) {
998                         if std::thread::panicking() {
999                                 return;
1000                         }
1001
1002                         if let Some(event_expectations) = &self.event_expectations {
1003                                 if !event_expectations.is_empty() {
1004                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1005                                 }
1006                         }
1007                 }
1008         }
1009
1010         fn get_full_filepath(filepath: String, filename: String) -> String {
1011                 let mut path = PathBuf::from(filepath);
1012                 path.push(filename);
1013                 path.to_str().unwrap().to_string()
1014         }
1015
1016         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1017                 let mut nodes = Vec::new();
1018                 for i in 0..num_nodes {
1019                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
1020                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1021                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1022                         let network = Network::Testnet;
1023                         let genesis_block = genesis_block(network);
1024                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1025                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1026                         let seed = [i as u8; 32];
1027                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1028                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1029                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1030                         let now = Duration::from_secs(genesis_block.header.time as u64);
1031                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1032                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1033                         let best_block = BestBlock::from_network(network);
1034                         let params = ChainParameters { network, best_block };
1035                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1036                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1037                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1038                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1039                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1040                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1041                         nodes.push(node);
1042                 }
1043
1044                 for i in 0..num_nodes {
1045                         for j in (i+1)..num_nodes {
1046                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1047                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1048                         }
1049                 }
1050
1051                 nodes
1052         }
1053
1054         macro_rules! open_channel {
1055                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1056                         begin_open_channel!($node_a, $node_b, $channel_value);
1057                         let events = $node_a.node.get_and_clear_pending_events();
1058                         assert_eq!(events.len(), 1);
1059                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1060                         end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
1061                         tx
1062                 }}
1063         }
1064
1065         macro_rules! begin_open_channel {
1066                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1067                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1068                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1069                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1070                 }}
1071         }
1072
1073         macro_rules! handle_funding_generation_ready {
1074                 ($event: expr, $channel_value: expr) => {{
1075                         match $event {
1076                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1077                                         assert_eq!(channel_value_satoshis, $channel_value);
1078                                         assert_eq!(user_channel_id, 42);
1079
1080                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1081                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1082                                         }]};
1083                                         (temporary_channel_id, tx)
1084                                 },
1085                                 _ => panic!("Unexpected event"),
1086                         }
1087                 }}
1088         }
1089
1090         macro_rules! end_open_channel {
1091                 ($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
1092                         $node_a.node.funding_transaction_generated(&$temporary_channel_id, &$node_b.node.get_our_node_id(), $tx.clone()).unwrap();
1093                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1094                         get_event!($node_b, Event::ChannelPending);
1095
1096                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1097                         get_event!($node_a, Event::ChannelPending);
1098                 }}
1099         }
1100
1101         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1102                 for i in 1..=depth {
1103                         let prev_blockhash = node.best_block.block_hash();
1104                         let height = node.best_block.height() + 1;
1105                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1106                         let txdata = vec![(0, tx)];
1107                         node.best_block = BestBlock::new(header.block_hash(), height);
1108                         match i {
1109                                 1 => {
1110                                         node.node.transactions_confirmed(&header, &txdata, height);
1111                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1112                                 },
1113                                 x if x == depth => {
1114                                         node.node.best_block_updated(&header, height);
1115                                         node.chain_monitor.best_block_updated(&header, height);
1116                                 },
1117                                 _ => {},
1118                         }
1119                 }
1120         }
1121         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1122                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1123         }
1124
1125         #[test]
1126         fn test_background_processor() {
1127                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1128                 // updates. Also test that when new updates are available, the manager signals that it needs
1129                 // re-persistence and is successfully re-persisted.
1130                 let nodes = create_nodes(2, "test_background_processor".to_string());
1131
1132                 // Go through the channel creation process so that each node has something to persist. Since
1133                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1134                 // avoid a race with processing events.
1135                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1136
1137                 // Initiate the background processors to watch each node.
1138                 let data_dir = nodes[0].persister.get_data_dir();
1139                 let persister = Arc::new(Persister::new(data_dir));
1140                 let event_handler = |_: _| {};
1141                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1142
1143                 macro_rules! check_persisted_data {
1144                         ($node: expr, $filepath: expr) => {
1145                                 let mut expected_bytes = Vec::new();
1146                                 loop {
1147                                         expected_bytes.clear();
1148                                         match $node.write(&mut expected_bytes) {
1149                                                 Ok(()) => {
1150                                                         match std::fs::read($filepath) {
1151                                                                 Ok(bytes) => {
1152                                                                         if bytes == expected_bytes {
1153                                                                                 break
1154                                                                         } else {
1155                                                                                 continue
1156                                                                         }
1157                                                                 },
1158                                                                 Err(_) => continue
1159                                                         }
1160                                                 },
1161                                                 Err(e) => panic!("Unexpected error: {}", e)
1162                                         }
1163                                 }
1164                         }
1165                 }
1166
1167                 // Check that the initial channel manager data is persisted as expected.
1168                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1169                 check_persisted_data!(nodes[0].node, filepath.clone());
1170
1171                 loop {
1172                         if !nodes[0].node.get_persistence_condvar_value() { break }
1173                 }
1174
1175                 // Force-close the channel.
1176                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1177
1178                 // Check that the force-close updates are persisted.
1179                 check_persisted_data!(nodes[0].node, filepath.clone());
1180                 loop {
1181                         if !nodes[0].node.get_persistence_condvar_value() { break }
1182                 }
1183
1184                 // Check network graph is persisted
1185                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1186                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1187
1188                 // Check scorer is persisted
1189                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1190                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1191
1192                 if !std::thread::panicking() {
1193                         bg_processor.stop().unwrap();
1194                 }
1195         }
1196
1197         #[test]
1198         fn test_timer_tick_called() {
1199                 // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
1200                 // `FRESHNESS_TIMER`.
1201                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1202                 let data_dir = nodes[0].persister.get_data_dir();
1203                 let persister = Arc::new(Persister::new(data_dir));
1204                 let event_handler = |_: _| {};
1205                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1206                 loop {
1207                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1208                         let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
1209                         let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
1210                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
1211                                         log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
1212                                 break
1213                         }
1214                 }
1215
1216                 if !std::thread::panicking() {
1217                         bg_processor.stop().unwrap();
1218                 }
1219         }
1220
1221         #[test]
1222         fn test_channel_manager_persist_error() {
1223                 // Test that if we encounter an error during manager persistence, the thread panics.
1224                 let nodes = create_nodes(2, "test_persist_error".to_string());
1225                 open_channel!(nodes[0], nodes[1], 100000);
1226
1227                 let data_dir = nodes[0].persister.get_data_dir();
1228                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1229                 let event_handler = |_: _| {};
1230                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1231                 match bg_processor.join() {
1232                         Ok(_) => panic!("Expected error persisting manager"),
1233                         Err(e) => {
1234                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1235                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1236                         },
1237                 }
1238         }
1239
1240         #[test]
1241         fn test_network_graph_persist_error() {
1242                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1243                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1244                 let data_dir = nodes[0].persister.get_data_dir();
1245                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1246                 let event_handler = |_: _| {};
1247                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1248
1249                 match bg_processor.stop() {
1250                         Ok(_) => panic!("Expected error persisting network graph"),
1251                         Err(e) => {
1252                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1253                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1254                         },
1255                 }
1256         }
1257
1258         #[test]
1259         fn test_scorer_persist_error() {
1260                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1261                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1262                 let data_dir = nodes[0].persister.get_data_dir();
1263                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1264                 let event_handler = |_: _| {};
1265                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1266
1267                 match bg_processor.stop() {
1268                         Ok(_) => panic!("Expected error persisting scorer"),
1269                         Err(e) => {
1270                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1271                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1272                         },
1273                 }
1274         }
1275
1276         #[test]
1277         fn test_background_event_handling() {
1278                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1279                 let channel_value = 100000;
1280                 let data_dir = nodes[0].persister.get_data_dir();
1281                 let persister = Arc::new(Persister::new(data_dir.clone()));
1282
1283                 // Set up a background event handler for FundingGenerationReady events.
1284                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1285                 let event_handler = move |event: Event| match event {
1286                         Event::FundingGenerationReady { .. } => sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1287                         Event::ChannelReady { .. } => {},
1288                         _ => panic!("Unexpected event: {:?}", event),
1289                 };
1290
1291                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1292
1293                 // Open a channel and check that the FundingGenerationReady event was handled.
1294                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1295                 let (temporary_channel_id, funding_tx) = receiver
1296                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1297                         .expect("FundingGenerationReady not handled within deadline");
1298                 end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
1299
1300                 // Confirm the funding transaction.
1301                 confirm_transaction(&mut nodes[0], &funding_tx);
1302                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1303                 confirm_transaction(&mut nodes[1], &funding_tx);
1304                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1305                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1306                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1307                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1308                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1309
1310                 if !std::thread::panicking() {
1311                         bg_processor.stop().unwrap();
1312                 }
1313
1314                 // Set up a background event handler for SpendableOutputs events.
1315                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1316                 let event_handler = move |event: Event| match event {
1317                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1318                         Event::ChannelReady { .. } => {},
1319                         Event::ChannelClosed { .. } => {},
1320                         _ => panic!("Unexpected event: {:?}", event),
1321                 };
1322                 let persister = Arc::new(Persister::new(data_dir));
1323                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1324
1325                 // Force close the channel and check that the SpendableOutputs event was handled.
1326                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1327                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1328                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1329
1330                 let event = receiver
1331                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1332                         .expect("Events not handled within deadline");
1333                 match event {
1334                         Event::SpendableOutputs { .. } => {},
1335                         _ => panic!("Unexpected event: {:?}", event),
1336                 }
1337
1338                 if !std::thread::panicking() {
1339                         bg_processor.stop().unwrap();
1340                 }
1341         }
1342
1343         #[test]
1344         fn test_scorer_persistence() {
1345                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1346                 let data_dir = nodes[0].persister.get_data_dir();
1347                 let persister = Arc::new(Persister::new(data_dir));
1348                 let event_handler = |_: _| {};
1349                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1350
1351                 loop {
1352                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1353                         let expected_log = "Persisting scorer".to_string();
1354                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1355                                 break
1356                         }
1357                 }
1358
1359                 if !std::thread::panicking() {
1360                         bg_processor.stop().unwrap();
1361                 }
1362         }
1363
1364         #[test]
1365         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1366                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1367                 let data_dir = nodes[0].persister.get_data_dir();
1368                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1369                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1370                 let network_graph = nodes[0].network_graph.clone();
1371                 let features = ChannelFeatures::empty();
1372                 network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
1373                         .expect("Failed to update channel from partial announcement");
1374                 let original_graph_description = network_graph.to_string();
1375                 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1376                 assert_eq!(network_graph.read_only().channels().len(), 1);
1377
1378                 let event_handler = |_: _| {};
1379                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1380
1381                 loop {
1382                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1383                         let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1384                         if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1385                                 .unwrap_or(&0) > 1
1386                         {
1387                                 // Wait until the loop has gone around at least twice.
1388                                 break
1389                         }
1390                 }
1391
1392                 let initialization_input = vec![
1393                         76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1394                         79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1395                         0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1396                         187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1397                         157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1398                         88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1399                         204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1400                         181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1401                         110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1402                         76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1403                         226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1404                         0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1405                         0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1406                 ];
1407                 nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1408
1409                 // this should have added two channels
1410                 assert_eq!(network_graph.read_only().channels().len(), 3);
1411
1412                 receiver
1413                         .recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
1414                         .expect("Network graph not pruned within deadline");
1415
1416                 background_processor.stop().unwrap();
1417
1418                 // all channels should now be pruned
1419                 assert_eq!(network_graph.read_only().channels().len(), 0);
1420         }
1421
1422         #[test]
1423         fn test_payment_path_scoring() {
1424                 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1425                 // that we update the scorer upon a payment path succeeding (note that the channel must be
1426                 // public or else we won't score it).
1427                 // Set up a background event handler for FundingGenerationReady events.
1428                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1429                 let event_handler = move |event: Event| match event {
1430                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1431                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1432                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1433                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1434                         _ => panic!("Unexpected event: {:?}", event),
1435                 };
1436
1437                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1438                 let data_dir = nodes[0].persister.get_data_dir();
1439                 let persister = Arc::new(Persister::new(data_dir));
1440                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1441
1442                 let scored_scid = 4242;
1443                 let secp_ctx = Secp256k1::new();
1444                 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1445                 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1446
1447                 let path = vec![RouteHop {
1448                         pubkey: node_1_id,
1449                         node_features: NodeFeatures::empty(),
1450                         short_channel_id: scored_scid,
1451                         channel_features: ChannelFeatures::empty(),
1452                         fee_msat: 0,
1453                         cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1454                 }];
1455
1456                 nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1457                 nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1458                         payment_id: None,
1459                         payment_hash: PaymentHash([42; 32]),
1460                         payment_failed_permanently: false,
1461                         failure: PathFailure::OnPath { network_update: None },
1462                         path: path.clone(),
1463                         short_channel_id: Some(scored_scid),
1464                 });
1465                 let event = receiver
1466                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1467                         .expect("PaymentPathFailed not handled within deadline");
1468                 match event {
1469                         Event::PaymentPathFailed { .. } => {},
1470                         _ => panic!("Unexpected event"),
1471                 }
1472
1473                 // Ensure we'll score payments that were explicitly failed back by the destination as
1474                 // ProbeSuccess.
1475                 nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1476                 nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1477                         payment_id: None,
1478                         payment_hash: PaymentHash([42; 32]),
1479                         payment_failed_permanently: true,
1480                         failure: PathFailure::OnPath { network_update: None },
1481                         path: path.clone(),
1482                         short_channel_id: None,
1483                 });
1484                 let event = receiver
1485                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1486                         .expect("PaymentPathFailed not handled within deadline");
1487                 match event {
1488                         Event::PaymentPathFailed { .. } => {},
1489                         _ => panic!("Unexpected event"),
1490                 }
1491
1492                 nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1493                 nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1494                         payment_id: PaymentId([42; 32]),
1495                         payment_hash: None,
1496                         path: path.clone(),
1497                 });
1498                 let event = receiver
1499                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1500                         .expect("PaymentPathSuccessful not handled within deadline");
1501                 match event {
1502                         Event::PaymentPathSuccessful { .. } => {},
1503                         _ => panic!("Unexpected event"),
1504                 }
1505
1506                 nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1507                 nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1508                         payment_id: PaymentId([42; 32]),
1509                         payment_hash: PaymentHash([42; 32]),
1510                         path: path.clone(),
1511                 });
1512                 let event = receiver
1513                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1514                         .expect("ProbeSuccessful not handled within deadline");
1515                 match event {
1516                         Event::ProbeSuccessful  { .. } => {},
1517                         _ => panic!("Unexpected event"),
1518                 }
1519
1520                 nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1521                 nodes[0].node.push_pending_event(Event::ProbeFailed {
1522                         payment_id: PaymentId([42; 32]),
1523                         payment_hash: PaymentHash([42; 32]),
1524                         path,
1525                         short_channel_id: Some(scored_scid),
1526                 });
1527                 let event = receiver
1528                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1529                         .expect("ProbeFailure not handled within deadline");
1530                 match event {
1531                         Event::ProbeFailed { .. } => {},
1532                         _ => panic!("Unexpected event"),
1533                 }
1534
1535                 if !std::thread::panicking() {
1536                         bg_processor.stop().unwrap();
1537                 }
1538         }
1539 }