Merge pull request #2221 from TheBlueMatt/2023-04-bp-exit-fast
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
68 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(not(test))]
120 const REBROADCAST_TIMER: u64 = 30;
121 #[cfg(test)]
122 const REBROADCAST_TIMER: u64 = 1;
123
124 #[cfg(feature = "futures")]
125 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
126 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
127 #[cfg(feature = "futures")]
128 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
129         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
130
131 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
132 pub enum GossipSync<
133         P: Deref<Target = P2PGossipSync<G, U, L>>,
134         R: Deref<Target = RapidGossipSync<G, L>>,
135         G: Deref<Target = NetworkGraph<L>>,
136         U: Deref,
137         L: Deref,
138 >
139 where U::Target: UtxoLookup, L::Target: Logger {
140         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
141         P2P(P),
142         /// Rapid gossip sync from a trusted server.
143         Rapid(R),
144         /// No gossip sync.
145         None,
146 }
147
148 impl<
149         P: Deref<Target = P2PGossipSync<G, U, L>>,
150         R: Deref<Target = RapidGossipSync<G, L>>,
151         G: Deref<Target = NetworkGraph<L>>,
152         U: Deref,
153         L: Deref,
154 > GossipSync<P, R, G, U, L>
155 where U::Target: UtxoLookup, L::Target: Logger {
156         fn network_graph(&self) -> Option<&G> {
157                 match self {
158                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
159                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::None => None,
161                 }
162         }
163
164         fn prunable_network_graph(&self) -> Option<&G> {
165                 match self {
166                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
167                         GossipSync::Rapid(gossip_sync) => {
168                                 if gossip_sync.is_initial_sync_complete() {
169                                         Some(gossip_sync.network_graph())
170                                 } else {
171                                         None
172                                 }
173                         },
174                         GossipSync::None => None,
175                 }
176         }
177 }
178
179 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
180 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
181         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
182 where
183         U::Target: UtxoLookup,
184         L::Target: Logger,
185 {
186         /// Initializes a new [`GossipSync::P2P`] variant.
187         pub fn p2p(gossip_sync: P) -> Self {
188                 GossipSync::P2P(gossip_sync)
189         }
190 }
191
192 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
193 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
194         GossipSync<
195                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
196                 R,
197                 G,
198                 &'a (dyn UtxoLookup + Send + Sync),
199                 L,
200         >
201 where
202         L::Target: Logger,
203 {
204         /// Initializes a new [`GossipSync::Rapid`] variant.
205         pub fn rapid(gossip_sync: R) -> Self {
206                 GossipSync::Rapid(gossip_sync)
207         }
208 }
209
210 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
211 impl<'a, L: Deref>
212         GossipSync<
213                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
214                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
215                 &'a NetworkGraph<L>,
216                 &'a (dyn UtxoLookup + Send + Sync),
217                 L,
218         >
219 where
220         L::Target: Logger,
221 {
222         /// Initializes a new [`GossipSync::None`] variant.
223         pub fn none() -> Self {
224                 GossipSync::None
225         }
226 }
227
228 fn handle_network_graph_update<L: Deref>(
229         network_graph: &NetworkGraph<L>, event: &Event
230 ) where L::Target: Logger {
231         if let Event::PaymentPathFailed {
232                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
233         {
234                 network_graph.handle_network_update(upd);
235         }
236 }
237
238 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
239         scorer: &'a S, event: &Event
240 ) {
241         let mut score = scorer.lock();
242         match event {
243                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
244                         score.payment_path_failed(path, *scid);
245                 },
246                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
247                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
248                         // because the payment made it all the way to the destination with sufficient liquidity.
249                         score.probe_successful(path);
250                 },
251                 Event::PaymentPathSuccessful { path, .. } => {
252                         score.payment_path_successful(path);
253                 },
254                 Event::ProbeSuccessful { path, .. } => {
255                         score.probe_successful(path);
256                 },
257                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
258                         score.probe_failed(path, *scid);
259                 },
260                 _ => {},
261         }
262 }
263
264 macro_rules! define_run_body {
265         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
266          $channel_manager: ident, $process_channel_manager_events: expr,
267          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
268          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
269          $check_slow_await: expr)
270         => { {
271                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
272                 $channel_manager.timer_tick_occurred();
273                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
274                 $chain_monitor.rebroadcast_pending_claims();
275
276                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
277                 let mut last_ping_call = $get_timer(PING_TIMER);
278                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
279                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
280                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
281                 let mut have_pruned = false;
282
283                 loop {
284                         $process_channel_manager_events;
285                         $process_chain_monitor_events;
286
287                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
288                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
289                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
290                         // without running the normal event processing above and handing events to users.
291                         //
292                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
293                         // processing a message effectively at any point during this loop. In order to
294                         // minimize the time between such processing completing and persisting the updated
295                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
296                         // generally, and as a fallback place such blocking only immediately before
297                         // persistence.
298                         $peer_manager.process_events();
299
300                         // Exit the loop if the background processor was requested to stop.
301                         if $loop_exit_check {
302                                 log_trace!($logger, "Terminating background processor.");
303                                 break;
304                         }
305
306                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
307                         // see `await_start`'s use below.
308                         let mut await_start = None;
309                         if $check_slow_await { await_start = Some($get_timer(1)); }
310                         let updates_available = $await;
311                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
312
313                         // Exit the loop if the background processor was requested to stop.
314                         if $loop_exit_check {
315                                 log_trace!($logger, "Terminating background processor.");
316                                 break;
317                         }
318
319                         if updates_available {
320                                 log_trace!($logger, "Persisting ChannelManager...");
321                                 $persister.persist_manager(&*$channel_manager)?;
322                                 log_trace!($logger, "Done persisting ChannelManager.");
323                         }
324                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
325                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
326                                 $channel_manager.timer_tick_occurred();
327                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
328                         }
329                         if await_slow {
330                                 // On various platforms, we may be starved of CPU cycles for several reasons.
331                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
332                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
333                                 // may not get any cycles.
334                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
335                                 // full second, at which point we assume sockets may have been killed (they
336                                 // appear to be at least on some platforms, even if it has only been a second).
337                                 // Note that we have to take care to not get here just because user event
338                                 // processing was slow at the top of the loop. For example, the sample client
339                                 // may call Bitcoin Core RPCs during event handling, which very often takes
340                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
341                                 // peers.
342                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
343                                 $peer_manager.disconnect_all_peers();
344                                 last_ping_call = $get_timer(PING_TIMER);
345                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
346                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
347                                 $peer_manager.timer_tick_occurred();
348                                 last_ping_call = $get_timer(PING_TIMER);
349                         }
350
351                         // Note that we want to run a graph prune once not long after startup before
352                         // falling back to our usual hourly prunes. This avoids short-lived clients never
353                         // pruning their network graph. We run once 60 seconds after startup before
354                         // continuing our normal cadence.
355                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
356                         if $timer_elapsed(&mut last_prune_call, prune_timer) {
357                                 // The network graph must not be pruned while rapid sync completion is pending
358                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
359                                         #[cfg(feature = "std")] {
360                                                 log_trace!($logger, "Pruning and persisting network graph.");
361                                                 network_graph.remove_stale_channels_and_tracking();
362                                         }
363                                         #[cfg(not(feature = "std"))] {
364                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
365                                                 log_trace!($logger, "Persisting network graph.");
366                                         }
367
368                                         if let Err(e) = $persister.persist_graph(network_graph) {
369                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
370                                         }
371
372                                         have_pruned = true;
373                                 }
374                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
375                                 last_prune_call = $get_timer(prune_timer);
376                         }
377
378                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
379                                 if let Some(ref scorer) = $scorer {
380                                         log_trace!($logger, "Persisting scorer");
381                                         if let Err(e) = $persister.persist_scorer(&scorer) {
382                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
383                                         }
384                                 }
385                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
386                         }
387
388                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
389                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
390                                 $chain_monitor.rebroadcast_pending_claims();
391                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
392                         }
393                 }
394
395                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
396                 // some races where users quit while channel updates were in-flight, with
397                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
398                 $persister.persist_manager(&*$channel_manager)?;
399
400                 // Persist Scorer on exit
401                 if let Some(ref scorer) = $scorer {
402                         $persister.persist_scorer(&scorer)?;
403                 }
404
405                 // Persist NetworkGraph on exit
406                 if let Some(network_graph) = $gossip_sync.network_graph() {
407                         $persister.persist_graph(network_graph)?;
408                 }
409
410                 Ok(())
411         } }
412 }
413
414 #[cfg(feature = "futures")]
415 pub(crate) mod futures_util {
416         use core::future::Future;
417         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
418         use core::pin::Pin;
419         use core::marker::Unpin;
420         pub(crate) struct Selector<
421                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
422         > {
423                 pub a: A,
424                 pub b: B,
425                 pub c: C,
426         }
427         pub(crate) enum SelectorOutput {
428                 A, B, C(bool),
429         }
430
431         impl<
432                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
433         > Future for Selector<A, B, C> {
434                 type Output = SelectorOutput;
435                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
436                         match Pin::new(&mut self.a).poll(ctx) {
437                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
438                                 Poll::Pending => {},
439                         }
440                         match Pin::new(&mut self.b).poll(ctx) {
441                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
442                                 Poll::Pending => {},
443                         }
444                         match Pin::new(&mut self.c).poll(ctx) {
445                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
446                                 Poll::Pending => {},
447                         }
448                         Poll::Pending
449                 }
450         }
451
452         // If we want to poll a future without an async context to figure out if it has completed or
453         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
454         // but sadly there's a good bit of boilerplate here.
455         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
456         fn dummy_waker_action(_: *const ()) { }
457
458         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
459                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
460         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
461 }
462 #[cfg(feature = "futures")]
463 use futures_util::{Selector, SelectorOutput, dummy_waker};
464 #[cfg(feature = "futures")]
465 use core::task;
466
467 /// Processes background events in a future.
468 ///
469 /// `sleeper` should return a future which completes in the given amount of time and returns a
470 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
471 /// future which outputs true, the loop will exit and this function's future will complete.
472 ///
473 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
474 ///
475 /// Requires the `futures` feature. Note that while this method is available without the `std`
476 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
477 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
478 /// manually instead.
479 ///
480 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
481 /// mobile device, where we may need to check for interruption of the application regularly. If you
482 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
483 /// are hundreds or thousands of simultaneous process calls running.
484 #[cfg(feature = "futures")]
485 pub async fn process_events_async<
486         'a,
487         UL: 'static + Deref + Send + Sync,
488         CF: 'static + Deref + Send + Sync,
489         CW: 'static + Deref + Send + Sync,
490         T: 'static + Deref + Send + Sync,
491         ES: 'static + Deref + Send + Sync,
492         NS: 'static + Deref + Send + Sync,
493         SP: 'static + Deref + Send + Sync,
494         F: 'static + Deref + Send + Sync,
495         R: 'static + Deref + Send + Sync,
496         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
497         L: 'static + Deref + Send + Sync,
498         P: 'static + Deref + Send + Sync,
499         Descriptor: 'static + SocketDescriptor + Send + Sync,
500         CMH: 'static + Deref + Send + Sync,
501         RMH: 'static + Deref + Send + Sync,
502         OMH: 'static + Deref + Send + Sync,
503         EventHandlerFuture: core::future::Future<Output = ()>,
504         EventHandler: Fn(Event) -> EventHandlerFuture,
505         PS: 'static + Deref + Send,
506         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
507         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
508         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
509         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
510         UMH: 'static + Deref + Send + Sync,
511         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
512         S: 'static + Deref<Target = SC> + Send + Sync,
513         SC: for<'b> WriteableScore<'b>,
514         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
515         Sleeper: Fn(Duration) -> SleepFuture
516 >(
517         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
518         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
519         sleeper: Sleeper, mobile_interruptable_platform: bool,
520 ) -> Result<(), lightning::io::Error>
521 where
522         UL::Target: 'static + UtxoLookup,
523         CF::Target: 'static + chain::Filter,
524         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
525         T::Target: 'static + BroadcasterInterface,
526         ES::Target: 'static + EntropySource,
527         NS::Target: 'static + NodeSigner,
528         SP::Target: 'static + SignerProvider,
529         F::Target: 'static + FeeEstimator,
530         R::Target: 'static + Router,
531         L::Target: 'static + Logger,
532         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
533         CMH::Target: 'static + ChannelMessageHandler,
534         OMH::Target: 'static + OnionMessageHandler,
535         RMH::Target: 'static + RoutingMessageHandler,
536         UMH::Target: 'static + CustomMessageHandler,
537         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
538 {
539         let mut should_break = false;
540         let async_event_handler = |event| {
541                 let network_graph = gossip_sync.network_graph();
542                 let event_handler = &event_handler;
543                 let scorer = &scorer;
544                 async move {
545                         if let Some(network_graph) = network_graph {
546                                 handle_network_graph_update(network_graph, &event)
547                         }
548                         if let Some(ref scorer) = scorer {
549                                 update_scorer(scorer, &event);
550                         }
551                         event_handler(event).await;
552                 }
553         };
554         define_run_body!(persister,
555                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
556                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
557                 gossip_sync, peer_manager, logger, scorer, should_break, {
558                         let fut = Selector {
559                                 a: channel_manager.get_persistable_update_future(),
560                                 b: chain_monitor.get_update_future(),
561                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
562                         };
563                         match fut.await {
564                                 SelectorOutput::A => true,
565                                 SelectorOutput::B => false,
566                                 SelectorOutput::C(exit) => {
567                                         should_break = exit;
568                                         false
569                                 }
570                         }
571                 }, |t| sleeper(Duration::from_secs(t)),
572                 |fut: &mut SleepFuture, _| {
573                         let mut waker = dummy_waker();
574                         let mut ctx = task::Context::from_waker(&mut waker);
575                         match core::pin::Pin::new(fut).poll(&mut ctx) {
576                                 task::Poll::Ready(exit) => { should_break = exit; true },
577                                 task::Poll::Pending => false,
578                         }
579                 }, mobile_interruptable_platform)
580 }
581
582 #[cfg(feature = "std")]
583 impl BackgroundProcessor {
584         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
585         /// documentation].
586         ///
587         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
588         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
589         /// either [`join`] or [`stop`].
590         ///
591         /// # Data Persistence
592         ///
593         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
594         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
595         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
596         /// provided implementation.
597         ///
598         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
599         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
600         /// See the `lightning-persister` crate for LDK's provided implementation.
601         ///
602         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
603         /// error or call [`join`] and handle any error that may arise. For the latter case,
604         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
605         ///
606         /// # Event Handling
607         ///
608         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
609         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
610         /// functionality implemented by other handlers.
611         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
612         ///
613         /// # Rapid Gossip Sync
614         ///
615         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
616         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
617         /// until the [`RapidGossipSync`] instance completes its first sync.
618         ///
619         /// [top-level documentation]: BackgroundProcessor
620         /// [`join`]: Self::join
621         /// [`stop`]: Self::stop
622         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
623         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
624         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
625         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
626         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
627         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
628         pub fn start<
629                 'a,
630                 UL: 'static + Deref + Send + Sync,
631                 CF: 'static + Deref + Send + Sync,
632                 CW: 'static + Deref + Send + Sync,
633                 T: 'static + Deref + Send + Sync,
634                 ES: 'static + Deref + Send + Sync,
635                 NS: 'static + Deref + Send + Sync,
636                 SP: 'static + Deref + Send + Sync,
637                 F: 'static + Deref + Send + Sync,
638                 R: 'static + Deref + Send + Sync,
639                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
640                 L: 'static + Deref + Send + Sync,
641                 P: 'static + Deref + Send + Sync,
642                 Descriptor: 'static + SocketDescriptor + Send + Sync,
643                 CMH: 'static + Deref + Send + Sync,
644                 OMH: 'static + Deref + Send + Sync,
645                 RMH: 'static + Deref + Send + Sync,
646                 EH: 'static + EventHandler + Send,
647                 PS: 'static + Deref + Send,
648                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
649                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
650                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
651                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
652                 UMH: 'static + Deref + Send + Sync,
653                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
654                 S: 'static + Deref<Target = SC> + Send + Sync,
655                 SC: for <'b> WriteableScore<'b>,
656         >(
657                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
658                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
659         ) -> Self
660         where
661                 UL::Target: 'static + UtxoLookup,
662                 CF::Target: 'static + chain::Filter,
663                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
664                 T::Target: 'static + BroadcasterInterface,
665                 ES::Target: 'static + EntropySource,
666                 NS::Target: 'static + NodeSigner,
667                 SP::Target: 'static + SignerProvider,
668                 F::Target: 'static + FeeEstimator,
669                 R::Target: 'static + Router,
670                 L::Target: 'static + Logger,
671                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
672                 CMH::Target: 'static + ChannelMessageHandler,
673                 OMH::Target: 'static + OnionMessageHandler,
674                 RMH::Target: 'static + RoutingMessageHandler,
675                 UMH::Target: 'static + CustomMessageHandler,
676                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
677         {
678                 let stop_thread = Arc::new(AtomicBool::new(false));
679                 let stop_thread_clone = stop_thread.clone();
680                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
681                         let event_handler = |event| {
682                                 let network_graph = gossip_sync.network_graph();
683                                 if let Some(network_graph) = network_graph {
684                                         handle_network_graph_update(network_graph, &event)
685                                 }
686                                 if let Some(ref scorer) = scorer {
687                                         update_scorer(scorer, &event);
688                                 }
689                                 event_handler.handle_event(event);
690                         };
691                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
692                                 channel_manager, channel_manager.process_pending_events(&event_handler),
693                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
694                                 Sleeper::from_two_futures(
695                                         channel_manager.get_persistable_update_future(),
696                                         chain_monitor.get_update_future()
697                                 ).wait_timeout(Duration::from_millis(100)),
698                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
699                 });
700                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
701         }
702
703         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
704         /// [`ChannelManager`].
705         ///
706         /// # Panics
707         ///
708         /// This function panics if the background thread has panicked such as while persisting or
709         /// handling events.
710         ///
711         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
712         pub fn join(mut self) -> Result<(), std::io::Error> {
713                 assert!(self.thread_handle.is_some());
714                 self.join_thread()
715         }
716
717         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
718         /// [`ChannelManager`].
719         ///
720         /// # Panics
721         ///
722         /// This function panics if the background thread has panicked such as while persisting or
723         /// handling events.
724         ///
725         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
726         pub fn stop(mut self) -> Result<(), std::io::Error> {
727                 assert!(self.thread_handle.is_some());
728                 self.stop_and_join_thread()
729         }
730
731         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
732                 self.stop_thread.store(true, Ordering::Release);
733                 self.join_thread()
734         }
735
736         fn join_thread(&mut self) -> Result<(), std::io::Error> {
737                 match self.thread_handle.take() {
738                         Some(handle) => handle.join().unwrap(),
739                         None => Ok(()),
740                 }
741         }
742 }
743
744 #[cfg(feature = "std")]
745 impl Drop for BackgroundProcessor {
746         fn drop(&mut self) {
747                 self.stop_and_join_thread().unwrap();
748         }
749 }
750
751 #[cfg(all(feature = "std", test))]
752 mod tests {
753         use bitcoin::blockdata::block::BlockHeader;
754         use bitcoin::blockdata::constants::genesis_block;
755         use bitcoin::blockdata::locktime::PackedLockTime;
756         use bitcoin::blockdata::transaction::{Transaction, TxOut};
757         use bitcoin::network::constants::Network;
758         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
759         use lightning::chain::{BestBlock, Confirm, chainmonitor};
760         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
761         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
762         use lightning::chain::transaction::OutPoint;
763         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
764         use lightning::{get_event_msg, get_event};
765         use lightning::ln::PaymentHash;
766         use lightning::ln::channelmanager;
767         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
768         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
769         use lightning::ln::msgs::{ChannelMessageHandler, Init};
770         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
771         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
772         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
773         use lightning::routing::scoring::{ChannelUsage, Score};
774         use lightning::util::config::UserConfig;
775         use lightning::util::ser::Writeable;
776         use lightning::util::test_utils;
777         use lightning::util::persist::KVStorePersister;
778         use lightning_persister::FilesystemPersister;
779         use std::collections::VecDeque;
780         use std::fs;
781         use std::path::PathBuf;
782         use std::sync::{Arc, Mutex};
783         use std::sync::mpsc::SyncSender;
784         use std::time::Duration;
785         use bitcoin::hashes::Hash;
786         use bitcoin::TxMerkleNode;
787         use lightning_rapid_gossip_sync::RapidGossipSync;
788         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
789
790         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
791
792         #[derive(Clone, Hash, PartialEq, Eq)]
793         struct TestDescriptor{}
794         impl SocketDescriptor for TestDescriptor {
795                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
796                         0
797                 }
798
799                 fn disconnect_socket(&mut self) {}
800         }
801
802         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
803
804         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
805
806         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
807         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
808
809         struct Node {
810                 node: Arc<ChannelManager>,
811                 p2p_gossip_sync: PGS,
812                 rapid_gossip_sync: RGS,
813                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
814                 chain_monitor: Arc<ChainMonitor>,
815                 persister: Arc<FilesystemPersister>,
816                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
817                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
818                 logger: Arc<test_utils::TestLogger>,
819                 best_block: BestBlock,
820                 scorer: Arc<Mutex<TestScorer>>,
821         }
822
823         impl Node {
824                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
825                         GossipSync::P2P(self.p2p_gossip_sync.clone())
826                 }
827
828                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
829                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
830                 }
831
832                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
833                         GossipSync::None
834                 }
835         }
836
837         impl Drop for Node {
838                 fn drop(&mut self) {
839                         let data_dir = self.persister.get_data_dir();
840                         match fs::remove_dir_all(data_dir.clone()) {
841                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
842                                 _ => {}
843                         }
844                 }
845         }
846
847         struct Persister {
848                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
849                 graph_persistence_notifier: Option<SyncSender<()>>,
850                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
851                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
852                 filesystem_persister: FilesystemPersister,
853         }
854
855         impl Persister {
856                 fn new(data_dir: String) -> Self {
857                         let filesystem_persister = FilesystemPersister::new(data_dir);
858                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
859                 }
860
861                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
862                         Self { graph_error: Some((error, message)), ..self }
863                 }
864
865                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
866                         Self { graph_persistence_notifier: Some(sender), ..self }
867                 }
868
869                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
870                         Self { manager_error: Some((error, message)), ..self }
871                 }
872
873                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
874                         Self { scorer_error: Some((error, message)), ..self }
875                 }
876         }
877
878         impl KVStorePersister for Persister {
879                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
880                         if key == "manager" {
881                                 if let Some((error, message)) = self.manager_error {
882                                         return Err(std::io::Error::new(error, message))
883                                 }
884                         }
885
886                         if key == "network_graph" {
887                                 if let Some(sender) = &self.graph_persistence_notifier {
888                                         match sender.send(()) {
889                                                 Ok(()) => {},
890                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
891                                         }
892                                 };
893
894                                 if let Some((error, message)) = self.graph_error {
895                                         return Err(std::io::Error::new(error, message))
896                                 }
897                         }
898
899                         if key == "scorer" {
900                                 if let Some((error, message)) = self.scorer_error {
901                                         return Err(std::io::Error::new(error, message))
902                                 }
903                         }
904
905                         self.filesystem_persister.persist(key, object)
906                 }
907         }
908
909         struct TestScorer {
910                 event_expectations: Option<VecDeque<TestResult>>,
911         }
912
913         #[derive(Debug)]
914         enum TestResult {
915                 PaymentFailure { path: Path, short_channel_id: u64 },
916                 PaymentSuccess { path: Path },
917                 ProbeFailure { path: Path },
918                 ProbeSuccess { path: Path },
919         }
920
921         impl TestScorer {
922                 fn new() -> Self {
923                         Self { event_expectations: None }
924                 }
925
926                 fn expect(&mut self, expectation: TestResult) {
927                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
928                 }
929         }
930
931         impl lightning::util::ser::Writeable for TestScorer {
932                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
933         }
934
935         impl Score for TestScorer {
936                 fn channel_penalty_msat(
937                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
938                 ) -> u64 { unimplemented!(); }
939
940                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
941                         if let Some(expectations) = &mut self.event_expectations {
942                                 match expectations.pop_front().unwrap() {
943                                         TestResult::PaymentFailure { path, short_channel_id } => {
944                                                 assert_eq!(actual_path, &path);
945                                                 assert_eq!(actual_short_channel_id, short_channel_id);
946                                         },
947                                         TestResult::PaymentSuccess { path } => {
948                                                 panic!("Unexpected successful payment path: {:?}", path)
949                                         },
950                                         TestResult::ProbeFailure { path } => {
951                                                 panic!("Unexpected probe failure: {:?}", path)
952                                         },
953                                         TestResult::ProbeSuccess { path } => {
954                                                 panic!("Unexpected probe success: {:?}", path)
955                                         }
956                                 }
957                         }
958                 }
959
960                 fn payment_path_successful(&mut self, actual_path: &Path) {
961                         if let Some(expectations) = &mut self.event_expectations {
962                                 match expectations.pop_front().unwrap() {
963                                         TestResult::PaymentFailure { path, .. } => {
964                                                 panic!("Unexpected payment path failure: {:?}", path)
965                                         },
966                                         TestResult::PaymentSuccess { path } => {
967                                                 assert_eq!(actual_path, &path);
968                                         },
969                                         TestResult::ProbeFailure { path } => {
970                                                 panic!("Unexpected probe failure: {:?}", path)
971                                         },
972                                         TestResult::ProbeSuccess { path } => {
973                                                 panic!("Unexpected probe success: {:?}", path)
974                                         }
975                                 }
976                         }
977                 }
978
979                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
980                         if let Some(expectations) = &mut self.event_expectations {
981                                 match expectations.pop_front().unwrap() {
982                                         TestResult::PaymentFailure { path, .. } => {
983                                                 panic!("Unexpected payment path failure: {:?}", path)
984                                         },
985                                         TestResult::PaymentSuccess { path } => {
986                                                 panic!("Unexpected payment path success: {:?}", path)
987                                         },
988                                         TestResult::ProbeFailure { path } => {
989                                                 assert_eq!(actual_path, &path);
990                                         },
991                                         TestResult::ProbeSuccess { path } => {
992                                                 panic!("Unexpected probe success: {:?}", path)
993                                         }
994                                 }
995                         }
996                 }
997                 fn probe_successful(&mut self, actual_path: &Path) {
998                         if let Some(expectations) = &mut self.event_expectations {
999                                 match expectations.pop_front().unwrap() {
1000                                         TestResult::PaymentFailure { path, .. } => {
1001                                                 panic!("Unexpected payment path failure: {:?}", path)
1002                                         },
1003                                         TestResult::PaymentSuccess { path } => {
1004                                                 panic!("Unexpected payment path success: {:?}", path)
1005                                         },
1006                                         TestResult::ProbeFailure { path } => {
1007                                                 panic!("Unexpected probe failure: {:?}", path)
1008                                         },
1009                                         TestResult::ProbeSuccess { path } => {
1010                                                 assert_eq!(actual_path, &path);
1011                                         }
1012                                 }
1013                         }
1014                 }
1015         }
1016
1017         impl Drop for TestScorer {
1018                 fn drop(&mut self) {
1019                         if std::thread::panicking() {
1020                                 return;
1021                         }
1022
1023                         if let Some(event_expectations) = &self.event_expectations {
1024                                 if !event_expectations.is_empty() {
1025                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1026                                 }
1027                         }
1028                 }
1029         }
1030
1031         fn get_full_filepath(filepath: String, filename: String) -> String {
1032                 let mut path = PathBuf::from(filepath);
1033                 path.push(filename);
1034                 path.to_str().unwrap().to_string()
1035         }
1036
1037         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1038                 let network = Network::Testnet;
1039                 let mut nodes = Vec::new();
1040                 for i in 0..num_nodes {
1041                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1042                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1043                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1044                         let genesis_block = genesis_block(network);
1045                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1046                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1047                         let seed = [i as u8; 32];
1048                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1049                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1050                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1051                         let now = Duration::from_secs(genesis_block.header.time as u64);
1052                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1053                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1054                         let best_block = BestBlock::from_network(network);
1055                         let params = ChainParameters { network, best_block };
1056                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1057                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1058                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1059                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1060                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1061                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1062                         nodes.push(node);
1063                 }
1064
1065                 for i in 0..num_nodes {
1066                         for j in (i+1)..num_nodes {
1067                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1068                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1069                         }
1070                 }
1071
1072                 nodes
1073         }
1074
1075         macro_rules! open_channel {
1076                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1077                         begin_open_channel!($node_a, $node_b, $channel_value);
1078                         let events = $node_a.node.get_and_clear_pending_events();
1079                         assert_eq!(events.len(), 1);
1080                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1081                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1082                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1083                         get_event!($node_b, Event::ChannelPending);
1084                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1085                         get_event!($node_a, Event::ChannelPending);
1086                         tx
1087                 }}
1088         }
1089
1090         macro_rules! begin_open_channel {
1091                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1092                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1093                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1094                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1095                 }}
1096         }
1097
1098         macro_rules! handle_funding_generation_ready {
1099                 ($event: expr, $channel_value: expr) => {{
1100                         match $event {
1101                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1102                                         assert_eq!(channel_value_satoshis, $channel_value);
1103                                         assert_eq!(user_channel_id, 42);
1104
1105                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1106                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1107                                         }]};
1108                                         (temporary_channel_id, tx)
1109                                 },
1110                                 _ => panic!("Unexpected event"),
1111                         }
1112                 }}
1113         }
1114
1115         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1116                 for i in 1..=depth {
1117                         let prev_blockhash = node.best_block.block_hash();
1118                         let height = node.best_block.height() + 1;
1119                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1120                         let txdata = vec![(0, tx)];
1121                         node.best_block = BestBlock::new(header.block_hash(), height);
1122                         match i {
1123                                 1 => {
1124                                         node.node.transactions_confirmed(&header, &txdata, height);
1125                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1126                                 },
1127                                 x if x == depth => {
1128                                         node.node.best_block_updated(&header, height);
1129                                         node.chain_monitor.best_block_updated(&header, height);
1130                                 },
1131                                 _ => {},
1132                         }
1133                 }
1134         }
1135         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1136                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1137         }
1138
1139         #[test]
1140         fn test_background_processor() {
1141                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1142                 // updates. Also test that when new updates are available, the manager signals that it needs
1143                 // re-persistence and is successfully re-persisted.
1144                 let nodes = create_nodes(2, "test_background_processor".to_string());
1145
1146                 // Go through the channel creation process so that each node has something to persist. Since
1147                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1148                 // avoid a race with processing events.
1149                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1150
1151                 // Initiate the background processors to watch each node.
1152                 let data_dir = nodes[0].persister.get_data_dir();
1153                 let persister = Arc::new(Persister::new(data_dir));
1154                 let event_handler = |_: _| {};
1155                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1156
1157                 macro_rules! check_persisted_data {
1158                         ($node: expr, $filepath: expr) => {
1159                                 let mut expected_bytes = Vec::new();
1160                                 loop {
1161                                         expected_bytes.clear();
1162                                         match $node.write(&mut expected_bytes) {
1163                                                 Ok(()) => {
1164                                                         match std::fs::read($filepath) {
1165                                                                 Ok(bytes) => {
1166                                                                         if bytes == expected_bytes {
1167                                                                                 break
1168                                                                         } else {
1169                                                                                 continue
1170                                                                         }
1171                                                                 },
1172                                                                 Err(_) => continue
1173                                                         }
1174                                                 },
1175                                                 Err(e) => panic!("Unexpected error: {}", e)
1176                                         }
1177                                 }
1178                         }
1179                 }
1180
1181                 // Check that the initial channel manager data is persisted as expected.
1182                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1183                 check_persisted_data!(nodes[0].node, filepath.clone());
1184
1185                 loop {
1186                         if !nodes[0].node.get_persistence_condvar_value() { break }
1187                 }
1188
1189                 // Force-close the channel.
1190                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1191
1192                 // Check that the force-close updates are persisted.
1193                 check_persisted_data!(nodes[0].node, filepath.clone());
1194                 loop {
1195                         if !nodes[0].node.get_persistence_condvar_value() { break }
1196                 }
1197
1198                 // Check network graph is persisted
1199                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1200                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1201
1202                 // Check scorer is persisted
1203                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1204                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1205
1206                 if !std::thread::panicking() {
1207                         bg_processor.stop().unwrap();
1208                 }
1209         }
1210
1211         #[test]
1212         fn test_timer_tick_called() {
1213                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1214                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1215                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1216                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1217                 let data_dir = nodes[0].persister.get_data_dir();
1218                 let persister = Arc::new(Persister::new(data_dir));
1219                 let event_handler = |_: _| {};
1220                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1221                 loop {
1222                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1223                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1224                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1225                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1226                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1227                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1228                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1229                                 break
1230                         }
1231                 }
1232
1233                 if !std::thread::panicking() {
1234                         bg_processor.stop().unwrap();
1235                 }
1236         }
1237
1238         #[test]
1239         fn test_channel_manager_persist_error() {
1240                 // Test that if we encounter an error during manager persistence, the thread panics.
1241                 let nodes = create_nodes(2, "test_persist_error".to_string());
1242                 open_channel!(nodes[0], nodes[1], 100000);
1243
1244                 let data_dir = nodes[0].persister.get_data_dir();
1245                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1246                 let event_handler = |_: _| {};
1247                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1248                 match bg_processor.join() {
1249                         Ok(_) => panic!("Expected error persisting manager"),
1250                         Err(e) => {
1251                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1252                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1253                         },
1254                 }
1255         }
1256
1257         #[tokio::test]
1258         #[cfg(feature = "futures")]
1259         async fn test_channel_manager_persist_error_async() {
1260                 // Test that if we encounter an error during manager persistence, the thread panics.
1261                 let nodes = create_nodes(2, "test_persist_error_sync".to_string());
1262                 open_channel!(nodes[0], nodes[1], 100000);
1263
1264                 let data_dir = nodes[0].persister.get_data_dir();
1265                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1266
1267                 let bp_future = super::process_events_async(
1268                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1269                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1270                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1271                                 Box::pin(async move {
1272                                         tokio::time::sleep(dur).await;
1273                                         false // Never exit
1274                                 })
1275                         }, false,
1276                 );
1277                 match bp_future.await {
1278                         Ok(_) => panic!("Expected error persisting manager"),
1279                         Err(e) => {
1280                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1281                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1282                         },
1283                 }
1284         }
1285
1286         #[test]
1287         fn test_network_graph_persist_error() {
1288                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1289                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1290                 let data_dir = nodes[0].persister.get_data_dir();
1291                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1292                 let event_handler = |_: _| {};
1293                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1294
1295                 match bg_processor.stop() {
1296                         Ok(_) => panic!("Expected error persisting network graph"),
1297                         Err(e) => {
1298                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1299                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1300                         },
1301                 }
1302         }
1303
1304         #[test]
1305         fn test_scorer_persist_error() {
1306                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1307                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1308                 let data_dir = nodes[0].persister.get_data_dir();
1309                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1310                 let event_handler = |_: _| {};
1311                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1312
1313                 match bg_processor.stop() {
1314                         Ok(_) => panic!("Expected error persisting scorer"),
1315                         Err(e) => {
1316                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1317                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1318                         },
1319                 }
1320         }
1321
1322         #[test]
1323         fn test_background_event_handling() {
1324                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1325                 let channel_value = 100000;
1326                 let data_dir = nodes[0].persister.get_data_dir();
1327                 let persister = Arc::new(Persister::new(data_dir.clone()));
1328
1329                 // Set up a background event handler for FundingGenerationReady events.
1330                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1331                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1332                 let event_handler = move |event: Event| match event {
1333                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1334                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1335                         Event::ChannelReady { .. } => {},
1336                         _ => panic!("Unexpected event: {:?}", event),
1337                 };
1338
1339                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1340
1341                 // Open a channel and check that the FundingGenerationReady event was handled.
1342                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1343                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1344                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1345                         .expect("FundingGenerationReady not handled within deadline");
1346                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1347                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1348                 get_event!(nodes[1], Event::ChannelPending);
1349                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1350                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1351                         .expect("ChannelPending not handled within deadline");
1352
1353                 // Confirm the funding transaction.
1354                 confirm_transaction(&mut nodes[0], &funding_tx);
1355                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1356                 confirm_transaction(&mut nodes[1], &funding_tx);
1357                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1358                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1359                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1360                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1361                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1362
1363                 if !std::thread::panicking() {
1364                         bg_processor.stop().unwrap();
1365                 }
1366
1367                 // Set up a background event handler for SpendableOutputs events.
1368                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1369                 let event_handler = move |event: Event| match event {
1370                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1371                         Event::ChannelReady { .. } => {},
1372                         Event::ChannelClosed { .. } => {},
1373                         _ => panic!("Unexpected event: {:?}", event),
1374                 };
1375                 let persister = Arc::new(Persister::new(data_dir));
1376                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1377
1378                 // Force close the channel and check that the SpendableOutputs event was handled.
1379                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1380                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1381                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1382
1383                 let event = receiver
1384                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1385                         .expect("Events not handled within deadline");
1386                 match event {
1387                         Event::SpendableOutputs { .. } => {},
1388                         _ => panic!("Unexpected event: {:?}", event),
1389                 }
1390
1391                 if !std::thread::panicking() {
1392                         bg_processor.stop().unwrap();
1393                 }
1394         }
1395
1396         #[test]
1397         fn test_scorer_persistence() {
1398                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1399                 let data_dir = nodes[0].persister.get_data_dir();
1400                 let persister = Arc::new(Persister::new(data_dir));
1401                 let event_handler = |_: _| {};
1402                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1403
1404                 loop {
1405                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1406                         let expected_log = "Persisting scorer".to_string();
1407                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1408                                 break
1409                         }
1410                 }
1411
1412                 if !std::thread::panicking() {
1413                         bg_processor.stop().unwrap();
1414                 }
1415         }
1416
1417         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1418                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1419                         let features = ChannelFeatures::empty();
1420                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1421                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1422                         ).expect("Failed to update channel from partial announcement");
1423                         let original_graph_description = $nodes[0].network_graph.to_string();
1424                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1425                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1426
1427                         loop {
1428                                 $sleep;
1429                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1430                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1431                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1432                                         .unwrap_or(&0) > 1
1433                                 {
1434                                         // Wait until the loop has gone around at least twice.
1435                                         break
1436                                 }
1437                         }
1438
1439                         let initialization_input = vec![
1440                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1441                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1442                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1443                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1444                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1445                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1446                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1447                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1448                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1449                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1450                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1451                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1452                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1453                         ];
1454                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1455
1456                         // this should have added two channels and pruned the previous one.
1457                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1458
1459                         $receive.expect("Network graph not pruned within deadline");
1460
1461                         // all channels should now be pruned
1462                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1463                 }
1464         }
1465
1466         #[test]
1467         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1468                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1469
1470                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1471                 let data_dir = nodes[0].persister.get_data_dir();
1472                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1473
1474                 let event_handler = |_: _| {};
1475                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1476
1477                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1478                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1479                         std::thread::sleep(Duration::from_millis(1)));
1480
1481                 background_processor.stop().unwrap();
1482         }
1483
1484         #[tokio::test]
1485         #[cfg(feature = "futures")]
1486         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1487                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1488
1489                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
1490                 let data_dir = nodes[0].persister.get_data_dir();
1491                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1492
1493                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1494                 let bp_future = super::process_events_async(
1495                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1496                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1497                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1498                                 let mut exit_receiver = exit_receiver.clone();
1499                                 Box::pin(async move {
1500                                         tokio::select! {
1501                                                 _ = tokio::time::sleep(dur) => false,
1502                                                 _ = exit_receiver.changed() => true,
1503                                         }
1504                                 })
1505                         }, false,
1506                 );
1507
1508                 let t1 = tokio::spawn(bp_future);
1509                 let t2 = tokio::spawn(async move {
1510                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1511                                 let mut i = 0;
1512                                 loop {
1513                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1514                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1515                                         assert!(i < 5);
1516                                         i += 1;
1517                                 }
1518                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1519                         exit_sender.send(()).unwrap();
1520                 });
1521                 let (r1, r2) = tokio::join!(t1, t2);
1522                 r1.unwrap().unwrap();
1523                 r2.unwrap()
1524         }
1525
1526         macro_rules! do_test_payment_path_scoring {
1527                 ($nodes: expr, $receive: expr) => {
1528                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1529                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1530                         // public or else we won't score it).
1531                         // A background event handler for FundingGenerationReady events must be hooked up to a
1532                         // running background processor.
1533                         let scored_scid = 4242;
1534                         let secp_ctx = Secp256k1::new();
1535                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1536                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1537
1538                         let path = Path { hops: vec![RouteHop {
1539                                 pubkey: node_1_id,
1540                                 node_features: NodeFeatures::empty(),
1541                                 short_channel_id: scored_scid,
1542                                 channel_features: ChannelFeatures::empty(),
1543                                 fee_msat: 0,
1544                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1545                         }], blinded_tail: None };
1546
1547                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1548                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1549                                 payment_id: None,
1550                                 payment_hash: PaymentHash([42; 32]),
1551                                 payment_failed_permanently: false,
1552                                 failure: PathFailure::OnPath { network_update: None },
1553                                 path: path.clone(),
1554                                 short_channel_id: Some(scored_scid),
1555                         });
1556                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1557                         match event {
1558                                 Event::PaymentPathFailed { .. } => {},
1559                                 _ => panic!("Unexpected event"),
1560                         }
1561
1562                         // Ensure we'll score payments that were explicitly failed back by the destination as
1563                         // ProbeSuccess.
1564                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1565                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1566                                 payment_id: None,
1567                                 payment_hash: PaymentHash([42; 32]),
1568                                 payment_failed_permanently: true,
1569                                 failure: PathFailure::OnPath { network_update: None },
1570                                 path: path.clone(),
1571                                 short_channel_id: None,
1572                         });
1573                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1574                         match event {
1575                                 Event::PaymentPathFailed { .. } => {},
1576                                 _ => panic!("Unexpected event"),
1577                         }
1578
1579                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1580                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1581                                 payment_id: PaymentId([42; 32]),
1582                                 payment_hash: None,
1583                                 path: path.clone(),
1584                         });
1585                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1586                         match event {
1587                                 Event::PaymentPathSuccessful { .. } => {},
1588                                 _ => panic!("Unexpected event"),
1589                         }
1590
1591                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1592                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1593                                 payment_id: PaymentId([42; 32]),
1594                                 payment_hash: PaymentHash([42; 32]),
1595                                 path: path.clone(),
1596                         });
1597                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1598                         match event {
1599                                 Event::ProbeSuccessful  { .. } => {},
1600                                 _ => panic!("Unexpected event"),
1601                         }
1602
1603                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1604                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1605                                 payment_id: PaymentId([42; 32]),
1606                                 payment_hash: PaymentHash([42; 32]),
1607                                 path,
1608                                 short_channel_id: Some(scored_scid),
1609                         });
1610                         let event = $receive.expect("ProbeFailure not handled within deadline");
1611                         match event {
1612                                 Event::ProbeFailed { .. } => {},
1613                                 _ => panic!("Unexpected event"),
1614                         }
1615                 }
1616         }
1617
1618         #[test]
1619         fn test_payment_path_scoring() {
1620                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1621                 let event_handler = move |event: Event| match event {
1622                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1623                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1624                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1625                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1626                         _ => panic!("Unexpected event: {:?}", event),
1627                 };
1628
1629                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1630                 let data_dir = nodes[0].persister.get_data_dir();
1631                 let persister = Arc::new(Persister::new(data_dir));
1632                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1633
1634                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1635
1636                 if !std::thread::panicking() {
1637                         bg_processor.stop().unwrap();
1638                 }
1639         }
1640
1641         #[tokio::test]
1642         #[cfg(feature = "futures")]
1643         async fn test_payment_path_scoring_async() {
1644                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1645                 let event_handler = move |event: Event| {
1646                         let sender_ref = sender.clone();
1647                         async move {
1648                                 match event {
1649                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1650                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1651                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1652                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1653                                         _ => panic!("Unexpected event: {:?}", event),
1654                                 }
1655                         }
1656                 };
1657
1658                 let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
1659                 let data_dir = nodes[0].persister.get_data_dir();
1660                 let persister = Arc::new(Persister::new(data_dir));
1661
1662                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1663
1664                 let bp_future = super::process_events_async(
1665                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1666                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1667                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1668                                 let mut exit_receiver = exit_receiver.clone();
1669                                 Box::pin(async move {
1670                                         tokio::select! {
1671                                                 _ = tokio::time::sleep(dur) => false,
1672                                                 _ = exit_receiver.changed() => true,
1673                                         }
1674                                 })
1675                         }, false,
1676                 );
1677                 let t1 = tokio::spawn(bp_future);
1678                 let t2 = tokio::spawn(async move {
1679                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1680                         exit_sender.send(()).unwrap();
1681                 });
1682
1683                 let (r1, r2) = tokio::join!(t1, t2);
1684                 r1.unwrap().unwrap();
1685                 r2.unwrap()
1686         }
1687 }