Merge pull request #2146 from valentinewallace/2023-03-blinded-pathfinding-groundwork
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
68 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(not(test))]
120 const REBROADCAST_TIMER: u64 = 30;
121 #[cfg(test)]
122 const REBROADCAST_TIMER: u64 = 1;
123
124 #[cfg(feature = "futures")]
125 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
126 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
127 #[cfg(feature = "futures")]
128 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
129         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
130
131 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
132 pub enum GossipSync<
133         P: Deref<Target = P2PGossipSync<G, U, L>>,
134         R: Deref<Target = RapidGossipSync<G, L>>,
135         G: Deref<Target = NetworkGraph<L>>,
136         U: Deref,
137         L: Deref,
138 >
139 where U::Target: UtxoLookup, L::Target: Logger {
140         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
141         P2P(P),
142         /// Rapid gossip sync from a trusted server.
143         Rapid(R),
144         /// No gossip sync.
145         None,
146 }
147
148 impl<
149         P: Deref<Target = P2PGossipSync<G, U, L>>,
150         R: Deref<Target = RapidGossipSync<G, L>>,
151         G: Deref<Target = NetworkGraph<L>>,
152         U: Deref,
153         L: Deref,
154 > GossipSync<P, R, G, U, L>
155 where U::Target: UtxoLookup, L::Target: Logger {
156         fn network_graph(&self) -> Option<&G> {
157                 match self {
158                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
159                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::None => None,
161                 }
162         }
163
164         fn prunable_network_graph(&self) -> Option<&G> {
165                 match self {
166                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
167                         GossipSync::Rapid(gossip_sync) => {
168                                 if gossip_sync.is_initial_sync_complete() {
169                                         Some(gossip_sync.network_graph())
170                                 } else {
171                                         None
172                                 }
173                         },
174                         GossipSync::None => None,
175                 }
176         }
177 }
178
179 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
180 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
181         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
182 where
183         U::Target: UtxoLookup,
184         L::Target: Logger,
185 {
186         /// Initializes a new [`GossipSync::P2P`] variant.
187         pub fn p2p(gossip_sync: P) -> Self {
188                 GossipSync::P2P(gossip_sync)
189         }
190 }
191
192 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
193 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
194         GossipSync<
195                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
196                 R,
197                 G,
198                 &'a (dyn UtxoLookup + Send + Sync),
199                 L,
200         >
201 where
202         L::Target: Logger,
203 {
204         /// Initializes a new [`GossipSync::Rapid`] variant.
205         pub fn rapid(gossip_sync: R) -> Self {
206                 GossipSync::Rapid(gossip_sync)
207         }
208 }
209
210 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
211 impl<'a, L: Deref>
212         GossipSync<
213                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
214                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
215                 &'a NetworkGraph<L>,
216                 &'a (dyn UtxoLookup + Send + Sync),
217                 L,
218         >
219 where
220         L::Target: Logger,
221 {
222         /// Initializes a new [`GossipSync::None`] variant.
223         pub fn none() -> Self {
224                 GossipSync::None
225         }
226 }
227
228 fn handle_network_graph_update<L: Deref>(
229         network_graph: &NetworkGraph<L>, event: &Event
230 ) where L::Target: Logger {
231         if let Event::PaymentPathFailed {
232                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
233         {
234                 network_graph.handle_network_update(upd);
235         }
236 }
237
238 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
239         scorer: &'a S, event: &Event
240 ) {
241         let mut score = scorer.lock();
242         match event {
243                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
244                         score.payment_path_failed(path, *scid);
245                 },
246                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
247                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
248                         // because the payment made it all the way to the destination with sufficient liquidity.
249                         score.probe_successful(path);
250                 },
251                 Event::PaymentPathSuccessful { path, .. } => {
252                         score.payment_path_successful(path);
253                 },
254                 Event::ProbeSuccessful { path, .. } => {
255                         score.probe_successful(path);
256                 },
257                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
258                         score.probe_failed(path, *scid);
259                 },
260                 _ => {},
261         }
262 }
263
264 macro_rules! define_run_body {
265         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
266          $channel_manager: ident, $process_channel_manager_events: expr,
267          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
268          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
269          $check_slow_await: expr)
270         => { {
271                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
272                 $channel_manager.timer_tick_occurred();
273                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
274                 $chain_monitor.rebroadcast_pending_claims();
275
276                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
277                 let mut last_ping_call = $get_timer(PING_TIMER);
278                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
279                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
280                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
281                 let mut have_pruned = false;
282
283                 loop {
284                         $process_channel_manager_events;
285                         $process_chain_monitor_events;
286
287                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
288                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
289                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
290                         // without running the normal event processing above and handing events to users.
291                         //
292                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
293                         // processing a message effectively at any point during this loop. In order to
294                         // minimize the time between such processing completing and persisting the updated
295                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
296                         // generally, and as a fallback place such blocking only immediately before
297                         // persistence.
298                         $peer_manager.process_events();
299
300                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
301                         // see `await_start`'s use below.
302                         let mut await_start = None;
303                         if $check_slow_await { await_start = Some($get_timer(1)); }
304                         let updates_available = $await;
305                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
306
307                         if updates_available {
308                                 log_trace!($logger, "Persisting ChannelManager...");
309                                 $persister.persist_manager(&*$channel_manager)?;
310                                 log_trace!($logger, "Done persisting ChannelManager.");
311                         }
312                         // Exit the loop if the background processor was requested to stop.
313                         if $loop_exit_check {
314                                 log_trace!($logger, "Terminating background processor.");
315                                 break;
316                         }
317                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
318                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
319                                 $channel_manager.timer_tick_occurred();
320                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
321                         }
322                         if await_slow {
323                                 // On various platforms, we may be starved of CPU cycles for several reasons.
324                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
325                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
326                                 // may not get any cycles.
327                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
328                                 // full second, at which point we assume sockets may have been killed (they
329                                 // appear to be at least on some platforms, even if it has only been a second).
330                                 // Note that we have to take care to not get here just because user event
331                                 // processing was slow at the top of the loop. For example, the sample client
332                                 // may call Bitcoin Core RPCs during event handling, which very often takes
333                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
334                                 // peers.
335                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
336                                 $peer_manager.disconnect_all_peers();
337                                 last_ping_call = $get_timer(PING_TIMER);
338                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
339                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
340                                 $peer_manager.timer_tick_occurred();
341                                 last_ping_call = $get_timer(PING_TIMER);
342                         }
343
344                         // Note that we want to run a graph prune once not long after startup before
345                         // falling back to our usual hourly prunes. This avoids short-lived clients never
346                         // pruning their network graph. We run once 60 seconds after startup before
347                         // continuing our normal cadence.
348                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
349                         if $timer_elapsed(&mut last_prune_call, prune_timer) {
350                                 // The network graph must not be pruned while rapid sync completion is pending
351                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
352                                         #[cfg(feature = "std")] {
353                                                 log_trace!($logger, "Pruning and persisting network graph.");
354                                                 network_graph.remove_stale_channels_and_tracking();
355                                         }
356                                         #[cfg(not(feature = "std"))] {
357                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
358                                                 log_trace!($logger, "Persisting network graph.");
359                                         }
360
361                                         if let Err(e) = $persister.persist_graph(network_graph) {
362                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
363                                         }
364
365                                         have_pruned = true;
366                                 }
367                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
368                                 last_prune_call = $get_timer(prune_timer);
369                         }
370
371                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
372                                 if let Some(ref scorer) = $scorer {
373                                         log_trace!($logger, "Persisting scorer");
374                                         if let Err(e) = $persister.persist_scorer(&scorer) {
375                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
376                                         }
377                                 }
378                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
379                         }
380
381                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
382                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
383                                 $chain_monitor.rebroadcast_pending_claims();
384                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
385                         }
386                 }
387
388                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
389                 // some races where users quit while channel updates were in-flight, with
390                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
391                 $persister.persist_manager(&*$channel_manager)?;
392
393                 // Persist Scorer on exit
394                 if let Some(ref scorer) = $scorer {
395                         $persister.persist_scorer(&scorer)?;
396                 }
397
398                 // Persist NetworkGraph on exit
399                 if let Some(network_graph) = $gossip_sync.network_graph() {
400                         $persister.persist_graph(network_graph)?;
401                 }
402
403                 Ok(())
404         } }
405 }
406
407 #[cfg(feature = "futures")]
408 pub(crate) mod futures_util {
409         use core::future::Future;
410         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
411         use core::pin::Pin;
412         use core::marker::Unpin;
413         pub(crate) struct Selector<
414                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
415         > {
416                 pub a: A,
417                 pub b: B,
418                 pub c: C,
419         }
420         pub(crate) enum SelectorOutput {
421                 A, B, C(bool),
422         }
423
424         impl<
425                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
426         > Future for Selector<A, B, C> {
427                 type Output = SelectorOutput;
428                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
429                         match Pin::new(&mut self.a).poll(ctx) {
430                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
431                                 Poll::Pending => {},
432                         }
433                         match Pin::new(&mut self.b).poll(ctx) {
434                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
435                                 Poll::Pending => {},
436                         }
437                         match Pin::new(&mut self.c).poll(ctx) {
438                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
439                                 Poll::Pending => {},
440                         }
441                         Poll::Pending
442                 }
443         }
444
445         // If we want to poll a future without an async context to figure out if it has completed or
446         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
447         // but sadly there's a good bit of boilerplate here.
448         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
449         fn dummy_waker_action(_: *const ()) { }
450
451         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
452                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
453         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
454 }
455 #[cfg(feature = "futures")]
456 use futures_util::{Selector, SelectorOutput, dummy_waker};
457 #[cfg(feature = "futures")]
458 use core::task;
459
460 /// Processes background events in a future.
461 ///
462 /// `sleeper` should return a future which completes in the given amount of time and returns a
463 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
464 /// future which outputs true, the loop will exit and this function's future will complete.
465 ///
466 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
467 ///
468 /// Requires the `futures` feature. Note that while this method is available without the `std`
469 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
470 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
471 /// manually instead.
472 ///
473 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
474 /// mobile device, where we may need to check for interruption of the application regularly. If you
475 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
476 /// are hundreds or thousands of simultaneous process calls running.
477 #[cfg(feature = "futures")]
478 pub async fn process_events_async<
479         'a,
480         UL: 'static + Deref + Send + Sync,
481         CF: 'static + Deref + Send + Sync,
482         CW: 'static + Deref + Send + Sync,
483         T: 'static + Deref + Send + Sync,
484         ES: 'static + Deref + Send + Sync,
485         NS: 'static + Deref + Send + Sync,
486         SP: 'static + Deref + Send + Sync,
487         F: 'static + Deref + Send + Sync,
488         R: 'static + Deref + Send + Sync,
489         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
490         L: 'static + Deref + Send + Sync,
491         P: 'static + Deref + Send + Sync,
492         Descriptor: 'static + SocketDescriptor + Send + Sync,
493         CMH: 'static + Deref + Send + Sync,
494         RMH: 'static + Deref + Send + Sync,
495         OMH: 'static + Deref + Send + Sync,
496         EventHandlerFuture: core::future::Future<Output = ()>,
497         EventHandler: Fn(Event) -> EventHandlerFuture,
498         PS: 'static + Deref + Send,
499         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
500         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
501         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
502         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
503         UMH: 'static + Deref + Send + Sync,
504         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
505         S: 'static + Deref<Target = SC> + Send + Sync,
506         SC: for<'b> WriteableScore<'b>,
507         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
508         Sleeper: Fn(Duration) -> SleepFuture
509 >(
510         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
511         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
512         sleeper: Sleeper, mobile_interruptable_platform: bool,
513 ) -> Result<(), lightning::io::Error>
514 where
515         UL::Target: 'static + UtxoLookup,
516         CF::Target: 'static + chain::Filter,
517         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
518         T::Target: 'static + BroadcasterInterface,
519         ES::Target: 'static + EntropySource,
520         NS::Target: 'static + NodeSigner,
521         SP::Target: 'static + SignerProvider,
522         F::Target: 'static + FeeEstimator,
523         R::Target: 'static + Router,
524         L::Target: 'static + Logger,
525         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
526         CMH::Target: 'static + ChannelMessageHandler,
527         OMH::Target: 'static + OnionMessageHandler,
528         RMH::Target: 'static + RoutingMessageHandler,
529         UMH::Target: 'static + CustomMessageHandler,
530         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
531 {
532         let mut should_break = false;
533         let async_event_handler = |event| {
534                 let network_graph = gossip_sync.network_graph();
535                 let event_handler = &event_handler;
536                 let scorer = &scorer;
537                 async move {
538                         if let Some(network_graph) = network_graph {
539                                 handle_network_graph_update(network_graph, &event)
540                         }
541                         if let Some(ref scorer) = scorer {
542                                 update_scorer(scorer, &event);
543                         }
544                         event_handler(event).await;
545                 }
546         };
547         define_run_body!(persister,
548                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
549                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
550                 gossip_sync, peer_manager, logger, scorer, should_break, {
551                         let fut = Selector {
552                                 a: channel_manager.get_persistable_update_future(),
553                                 b: chain_monitor.get_update_future(),
554                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
555                         };
556                         match fut.await {
557                                 SelectorOutput::A => true,
558                                 SelectorOutput::B => false,
559                                 SelectorOutput::C(exit) => {
560                                         should_break = exit;
561                                         false
562                                 }
563                         }
564                 }, |t| sleeper(Duration::from_secs(t)),
565                 |fut: &mut SleepFuture, _| {
566                         let mut waker = dummy_waker();
567                         let mut ctx = task::Context::from_waker(&mut waker);
568                         match core::pin::Pin::new(fut).poll(&mut ctx) {
569                                 task::Poll::Ready(exit) => { should_break = exit; true },
570                                 task::Poll::Pending => false,
571                         }
572                 }, mobile_interruptable_platform)
573 }
574
575 #[cfg(feature = "std")]
576 impl BackgroundProcessor {
577         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
578         /// documentation].
579         ///
580         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
581         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
582         /// either [`join`] or [`stop`].
583         ///
584         /// # Data Persistence
585         ///
586         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
587         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
588         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
589         /// provided implementation.
590         ///
591         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
592         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
593         /// See the `lightning-persister` crate for LDK's provided implementation.
594         ///
595         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
596         /// error or call [`join`] and handle any error that may arise. For the latter case,
597         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
598         ///
599         /// # Event Handling
600         ///
601         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
602         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
603         /// functionality implemented by other handlers.
604         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
605         ///
606         /// # Rapid Gossip Sync
607         ///
608         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
609         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
610         /// until the [`RapidGossipSync`] instance completes its first sync.
611         ///
612         /// [top-level documentation]: BackgroundProcessor
613         /// [`join`]: Self::join
614         /// [`stop`]: Self::stop
615         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
616         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
617         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
618         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
619         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
620         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
621         pub fn start<
622                 'a,
623                 UL: 'static + Deref + Send + Sync,
624                 CF: 'static + Deref + Send + Sync,
625                 CW: 'static + Deref + Send + Sync,
626                 T: 'static + Deref + Send + Sync,
627                 ES: 'static + Deref + Send + Sync,
628                 NS: 'static + Deref + Send + Sync,
629                 SP: 'static + Deref + Send + Sync,
630                 F: 'static + Deref + Send + Sync,
631                 R: 'static + Deref + Send + Sync,
632                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
633                 L: 'static + Deref + Send + Sync,
634                 P: 'static + Deref + Send + Sync,
635                 Descriptor: 'static + SocketDescriptor + Send + Sync,
636                 CMH: 'static + Deref + Send + Sync,
637                 OMH: 'static + Deref + Send + Sync,
638                 RMH: 'static + Deref + Send + Sync,
639                 EH: 'static + EventHandler + Send,
640                 PS: 'static + Deref + Send,
641                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
642                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
643                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
644                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
645                 UMH: 'static + Deref + Send + Sync,
646                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
647                 S: 'static + Deref<Target = SC> + Send + Sync,
648                 SC: for <'b> WriteableScore<'b>,
649         >(
650                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
651                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
652         ) -> Self
653         where
654                 UL::Target: 'static + UtxoLookup,
655                 CF::Target: 'static + chain::Filter,
656                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
657                 T::Target: 'static + BroadcasterInterface,
658                 ES::Target: 'static + EntropySource,
659                 NS::Target: 'static + NodeSigner,
660                 SP::Target: 'static + SignerProvider,
661                 F::Target: 'static + FeeEstimator,
662                 R::Target: 'static + Router,
663                 L::Target: 'static + Logger,
664                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
665                 CMH::Target: 'static + ChannelMessageHandler,
666                 OMH::Target: 'static + OnionMessageHandler,
667                 RMH::Target: 'static + RoutingMessageHandler,
668                 UMH::Target: 'static + CustomMessageHandler,
669                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
670         {
671                 let stop_thread = Arc::new(AtomicBool::new(false));
672                 let stop_thread_clone = stop_thread.clone();
673                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
674                         let event_handler = |event| {
675                                 let network_graph = gossip_sync.network_graph();
676                                 if let Some(network_graph) = network_graph {
677                                         handle_network_graph_update(network_graph, &event)
678                                 }
679                                 if let Some(ref scorer) = scorer {
680                                         update_scorer(scorer, &event);
681                                 }
682                                 event_handler.handle_event(event);
683                         };
684                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
685                                 channel_manager, channel_manager.process_pending_events(&event_handler),
686                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
687                                 Sleeper::from_two_futures(
688                                         channel_manager.get_persistable_update_future(),
689                                         chain_monitor.get_update_future()
690                                 ).wait_timeout(Duration::from_millis(100)),
691                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
692                 });
693                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
694         }
695
696         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
697         /// [`ChannelManager`].
698         ///
699         /// # Panics
700         ///
701         /// This function panics if the background thread has panicked such as while persisting or
702         /// handling events.
703         ///
704         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
705         pub fn join(mut self) -> Result<(), std::io::Error> {
706                 assert!(self.thread_handle.is_some());
707                 self.join_thread()
708         }
709
710         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
711         /// [`ChannelManager`].
712         ///
713         /// # Panics
714         ///
715         /// This function panics if the background thread has panicked such as while persisting or
716         /// handling events.
717         ///
718         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
719         pub fn stop(mut self) -> Result<(), std::io::Error> {
720                 assert!(self.thread_handle.is_some());
721                 self.stop_and_join_thread()
722         }
723
724         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
725                 self.stop_thread.store(true, Ordering::Release);
726                 self.join_thread()
727         }
728
729         fn join_thread(&mut self) -> Result<(), std::io::Error> {
730                 match self.thread_handle.take() {
731                         Some(handle) => handle.join().unwrap(),
732                         None => Ok(()),
733                 }
734         }
735 }
736
737 #[cfg(feature = "std")]
738 impl Drop for BackgroundProcessor {
739         fn drop(&mut self) {
740                 self.stop_and_join_thread().unwrap();
741         }
742 }
743
744 #[cfg(all(feature = "std", test))]
745 mod tests {
746         use bitcoin::blockdata::block::BlockHeader;
747         use bitcoin::blockdata::constants::genesis_block;
748         use bitcoin::blockdata::locktime::PackedLockTime;
749         use bitcoin::blockdata::transaction::{Transaction, TxOut};
750         use bitcoin::network::constants::Network;
751         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
752         use lightning::chain::{BestBlock, Confirm, chainmonitor};
753         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
754         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
755         use lightning::chain::transaction::OutPoint;
756         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
757         use lightning::{get_event_msg, get_event};
758         use lightning::ln::PaymentHash;
759         use lightning::ln::channelmanager;
760         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
761         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
762         use lightning::ln::msgs::{ChannelMessageHandler, Init};
763         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
764         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
765         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
766         use lightning::routing::scoring::{ChannelUsage, Score};
767         use lightning::util::config::UserConfig;
768         use lightning::util::ser::Writeable;
769         use lightning::util::test_utils;
770         use lightning::util::persist::KVStorePersister;
771         use lightning_persister::FilesystemPersister;
772         use std::collections::VecDeque;
773         use std::fs;
774         use std::path::PathBuf;
775         use std::sync::{Arc, Mutex};
776         use std::sync::mpsc::SyncSender;
777         use std::time::Duration;
778         use bitcoin::hashes::Hash;
779         use bitcoin::TxMerkleNode;
780         use lightning_rapid_gossip_sync::RapidGossipSync;
781         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
782
783         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
784
785         #[derive(Clone, Hash, PartialEq, Eq)]
786         struct TestDescriptor{}
787         impl SocketDescriptor for TestDescriptor {
788                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
789                         0
790                 }
791
792                 fn disconnect_socket(&mut self) {}
793         }
794
795         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
796
797         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
798
799         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
800         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
801
802         struct Node {
803                 node: Arc<ChannelManager>,
804                 p2p_gossip_sync: PGS,
805                 rapid_gossip_sync: RGS,
806                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
807                 chain_monitor: Arc<ChainMonitor>,
808                 persister: Arc<FilesystemPersister>,
809                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
810                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
811                 logger: Arc<test_utils::TestLogger>,
812                 best_block: BestBlock,
813                 scorer: Arc<Mutex<TestScorer>>,
814         }
815
816         impl Node {
817                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
818                         GossipSync::P2P(self.p2p_gossip_sync.clone())
819                 }
820
821                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
822                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
823                 }
824
825                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
826                         GossipSync::None
827                 }
828         }
829
830         impl Drop for Node {
831                 fn drop(&mut self) {
832                         let data_dir = self.persister.get_data_dir();
833                         match fs::remove_dir_all(data_dir.clone()) {
834                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
835                                 _ => {}
836                         }
837                 }
838         }
839
840         struct Persister {
841                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
842                 graph_persistence_notifier: Option<SyncSender<()>>,
843                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
844                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
845                 filesystem_persister: FilesystemPersister,
846         }
847
848         impl Persister {
849                 fn new(data_dir: String) -> Self {
850                         let filesystem_persister = FilesystemPersister::new(data_dir);
851                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
852                 }
853
854                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
855                         Self { graph_error: Some((error, message)), ..self }
856                 }
857
858                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
859                         Self { graph_persistence_notifier: Some(sender), ..self }
860                 }
861
862                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
863                         Self { manager_error: Some((error, message)), ..self }
864                 }
865
866                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
867                         Self { scorer_error: Some((error, message)), ..self }
868                 }
869         }
870
871         impl KVStorePersister for Persister {
872                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
873                         if key == "manager" {
874                                 if let Some((error, message)) = self.manager_error {
875                                         return Err(std::io::Error::new(error, message))
876                                 }
877                         }
878
879                         if key == "network_graph" {
880                                 if let Some(sender) = &self.graph_persistence_notifier {
881                                         match sender.send(()) {
882                                                 Ok(()) => {},
883                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
884                                         }
885                                 };
886
887                                 if let Some((error, message)) = self.graph_error {
888                                         return Err(std::io::Error::new(error, message))
889                                 }
890                         }
891
892                         if key == "scorer" {
893                                 if let Some((error, message)) = self.scorer_error {
894                                         return Err(std::io::Error::new(error, message))
895                                 }
896                         }
897
898                         self.filesystem_persister.persist(key, object)
899                 }
900         }
901
902         struct TestScorer {
903                 event_expectations: Option<VecDeque<TestResult>>,
904         }
905
906         #[derive(Debug)]
907         enum TestResult {
908                 PaymentFailure { path: Path, short_channel_id: u64 },
909                 PaymentSuccess { path: Path },
910                 ProbeFailure { path: Path },
911                 ProbeSuccess { path: Path },
912         }
913
914         impl TestScorer {
915                 fn new() -> Self {
916                         Self { event_expectations: None }
917                 }
918
919                 fn expect(&mut self, expectation: TestResult) {
920                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
921                 }
922         }
923
924         impl lightning::util::ser::Writeable for TestScorer {
925                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
926         }
927
928         impl Score for TestScorer {
929                 fn channel_penalty_msat(
930                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
931                 ) -> u64 { unimplemented!(); }
932
933                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
934                         if let Some(expectations) = &mut self.event_expectations {
935                                 match expectations.pop_front().unwrap() {
936                                         TestResult::PaymentFailure { path, short_channel_id } => {
937                                                 assert_eq!(actual_path, &path);
938                                                 assert_eq!(actual_short_channel_id, short_channel_id);
939                                         },
940                                         TestResult::PaymentSuccess { path } => {
941                                                 panic!("Unexpected successful payment path: {:?}", path)
942                                         },
943                                         TestResult::ProbeFailure { path } => {
944                                                 panic!("Unexpected probe failure: {:?}", path)
945                                         },
946                                         TestResult::ProbeSuccess { path } => {
947                                                 panic!("Unexpected probe success: {:?}", path)
948                                         }
949                                 }
950                         }
951                 }
952
953                 fn payment_path_successful(&mut self, actual_path: &Path) {
954                         if let Some(expectations) = &mut self.event_expectations {
955                                 match expectations.pop_front().unwrap() {
956                                         TestResult::PaymentFailure { path, .. } => {
957                                                 panic!("Unexpected payment path failure: {:?}", path)
958                                         },
959                                         TestResult::PaymentSuccess { path } => {
960                                                 assert_eq!(actual_path, &path);
961                                         },
962                                         TestResult::ProbeFailure { path } => {
963                                                 panic!("Unexpected probe failure: {:?}", path)
964                                         },
965                                         TestResult::ProbeSuccess { path } => {
966                                                 panic!("Unexpected probe success: {:?}", path)
967                                         }
968                                 }
969                         }
970                 }
971
972                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
973                         if let Some(expectations) = &mut self.event_expectations {
974                                 match expectations.pop_front().unwrap() {
975                                         TestResult::PaymentFailure { path, .. } => {
976                                                 panic!("Unexpected payment path failure: {:?}", path)
977                                         },
978                                         TestResult::PaymentSuccess { path } => {
979                                                 panic!("Unexpected payment path success: {:?}", path)
980                                         },
981                                         TestResult::ProbeFailure { path } => {
982                                                 assert_eq!(actual_path, &path);
983                                         },
984                                         TestResult::ProbeSuccess { path } => {
985                                                 panic!("Unexpected probe success: {:?}", path)
986                                         }
987                                 }
988                         }
989                 }
990                 fn probe_successful(&mut self, actual_path: &Path) {
991                         if let Some(expectations) = &mut self.event_expectations {
992                                 match expectations.pop_front().unwrap() {
993                                         TestResult::PaymentFailure { path, .. } => {
994                                                 panic!("Unexpected payment path failure: {:?}", path)
995                                         },
996                                         TestResult::PaymentSuccess { path } => {
997                                                 panic!("Unexpected payment path success: {:?}", path)
998                                         },
999                                         TestResult::ProbeFailure { path } => {
1000                                                 panic!("Unexpected probe failure: {:?}", path)
1001                                         },
1002                                         TestResult::ProbeSuccess { path } => {
1003                                                 assert_eq!(actual_path, &path);
1004                                         }
1005                                 }
1006                         }
1007                 }
1008         }
1009
1010         impl Drop for TestScorer {
1011                 fn drop(&mut self) {
1012                         if std::thread::panicking() {
1013                                 return;
1014                         }
1015
1016                         if let Some(event_expectations) = &self.event_expectations {
1017                                 if !event_expectations.is_empty() {
1018                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1019                                 }
1020                         }
1021                 }
1022         }
1023
1024         fn get_full_filepath(filepath: String, filename: String) -> String {
1025                 let mut path = PathBuf::from(filepath);
1026                 path.push(filename);
1027                 path.to_str().unwrap().to_string()
1028         }
1029
1030         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1031                 let network = Network::Testnet;
1032                 let mut nodes = Vec::new();
1033                 for i in 0..num_nodes {
1034                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1035                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1036                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1037                         let genesis_block = genesis_block(network);
1038                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1039                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1040                         let seed = [i as u8; 32];
1041                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1042                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1043                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1044                         let now = Duration::from_secs(genesis_block.header.time as u64);
1045                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1046                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1047                         let best_block = BestBlock::from_network(network);
1048                         let params = ChainParameters { network, best_block };
1049                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1050                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1051                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1052                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1053                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1054                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1055                         nodes.push(node);
1056                 }
1057
1058                 for i in 0..num_nodes {
1059                         for j in (i+1)..num_nodes {
1060                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1061                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1062                         }
1063                 }
1064
1065                 nodes
1066         }
1067
1068         macro_rules! open_channel {
1069                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1070                         begin_open_channel!($node_a, $node_b, $channel_value);
1071                         let events = $node_a.node.get_and_clear_pending_events();
1072                         assert_eq!(events.len(), 1);
1073                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1074                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1075                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1076                         get_event!($node_b, Event::ChannelPending);
1077                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1078                         get_event!($node_a, Event::ChannelPending);
1079                         tx
1080                 }}
1081         }
1082
1083         macro_rules! begin_open_channel {
1084                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1085                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1086                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1087                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1088                 }}
1089         }
1090
1091         macro_rules! handle_funding_generation_ready {
1092                 ($event: expr, $channel_value: expr) => {{
1093                         match $event {
1094                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1095                                         assert_eq!(channel_value_satoshis, $channel_value);
1096                                         assert_eq!(user_channel_id, 42);
1097
1098                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1099                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1100                                         }]};
1101                                         (temporary_channel_id, tx)
1102                                 },
1103                                 _ => panic!("Unexpected event"),
1104                         }
1105                 }}
1106         }
1107
1108         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1109                 for i in 1..=depth {
1110                         let prev_blockhash = node.best_block.block_hash();
1111                         let height = node.best_block.height() + 1;
1112                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1113                         let txdata = vec![(0, tx)];
1114                         node.best_block = BestBlock::new(header.block_hash(), height);
1115                         match i {
1116                                 1 => {
1117                                         node.node.transactions_confirmed(&header, &txdata, height);
1118                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1119                                 },
1120                                 x if x == depth => {
1121                                         node.node.best_block_updated(&header, height);
1122                                         node.chain_monitor.best_block_updated(&header, height);
1123                                 },
1124                                 _ => {},
1125                         }
1126                 }
1127         }
1128         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1129                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1130         }
1131
1132         #[test]
1133         fn test_background_processor() {
1134                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1135                 // updates. Also test that when new updates are available, the manager signals that it needs
1136                 // re-persistence and is successfully re-persisted.
1137                 let nodes = create_nodes(2, "test_background_processor".to_string());
1138
1139                 // Go through the channel creation process so that each node has something to persist. Since
1140                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1141                 // avoid a race with processing events.
1142                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1143
1144                 // Initiate the background processors to watch each node.
1145                 let data_dir = nodes[0].persister.get_data_dir();
1146                 let persister = Arc::new(Persister::new(data_dir));
1147                 let event_handler = |_: _| {};
1148                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1149
1150                 macro_rules! check_persisted_data {
1151                         ($node: expr, $filepath: expr) => {
1152                                 let mut expected_bytes = Vec::new();
1153                                 loop {
1154                                         expected_bytes.clear();
1155                                         match $node.write(&mut expected_bytes) {
1156                                                 Ok(()) => {
1157                                                         match std::fs::read($filepath) {
1158                                                                 Ok(bytes) => {
1159                                                                         if bytes == expected_bytes {
1160                                                                                 break
1161                                                                         } else {
1162                                                                                 continue
1163                                                                         }
1164                                                                 },
1165                                                                 Err(_) => continue
1166                                                         }
1167                                                 },
1168                                                 Err(e) => panic!("Unexpected error: {}", e)
1169                                         }
1170                                 }
1171                         }
1172                 }
1173
1174                 // Check that the initial channel manager data is persisted as expected.
1175                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1176                 check_persisted_data!(nodes[0].node, filepath.clone());
1177
1178                 loop {
1179                         if !nodes[0].node.get_persistence_condvar_value() { break }
1180                 }
1181
1182                 // Force-close the channel.
1183                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1184
1185                 // Check that the force-close updates are persisted.
1186                 check_persisted_data!(nodes[0].node, filepath.clone());
1187                 loop {
1188                         if !nodes[0].node.get_persistence_condvar_value() { break }
1189                 }
1190
1191                 // Check network graph is persisted
1192                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1193                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1194
1195                 // Check scorer is persisted
1196                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1197                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1198
1199                 if !std::thread::panicking() {
1200                         bg_processor.stop().unwrap();
1201                 }
1202         }
1203
1204         #[test]
1205         fn test_timer_tick_called() {
1206                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1207                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1208                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1209                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1210                 let data_dir = nodes[0].persister.get_data_dir();
1211                 let persister = Arc::new(Persister::new(data_dir));
1212                 let event_handler = |_: _| {};
1213                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1214                 loop {
1215                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1216                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1217                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1218                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1219                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1220                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1221                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1222                                 break
1223                         }
1224                 }
1225
1226                 if !std::thread::panicking() {
1227                         bg_processor.stop().unwrap();
1228                 }
1229         }
1230
1231         #[test]
1232         fn test_channel_manager_persist_error() {
1233                 // Test that if we encounter an error during manager persistence, the thread panics.
1234                 let nodes = create_nodes(2, "test_persist_error".to_string());
1235                 open_channel!(nodes[0], nodes[1], 100000);
1236
1237                 let data_dir = nodes[0].persister.get_data_dir();
1238                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1239                 let event_handler = |_: _| {};
1240                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1241                 match bg_processor.join() {
1242                         Ok(_) => panic!("Expected error persisting manager"),
1243                         Err(e) => {
1244                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1245                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1246                         },
1247                 }
1248         }
1249
1250         #[tokio::test]
1251         #[cfg(feature = "futures")]
1252         async fn test_channel_manager_persist_error_async() {
1253                 // Test that if we encounter an error during manager persistence, the thread panics.
1254                 let nodes = create_nodes(2, "test_persist_error_sync".to_string());
1255                 open_channel!(nodes[0], nodes[1], 100000);
1256
1257                 let data_dir = nodes[0].persister.get_data_dir();
1258                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1259
1260                 let bp_future = super::process_events_async(
1261                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1262                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1263                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1264                                 Box::pin(async move {
1265                                         tokio::time::sleep(dur).await;
1266                                         false // Never exit
1267                                 })
1268                         }, false,
1269                 );
1270                 match bp_future.await {
1271                         Ok(_) => panic!("Expected error persisting manager"),
1272                         Err(e) => {
1273                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1274                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1275                         },
1276                 }
1277         }
1278
1279         #[test]
1280         fn test_network_graph_persist_error() {
1281                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1282                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1283                 let data_dir = nodes[0].persister.get_data_dir();
1284                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1285                 let event_handler = |_: _| {};
1286                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1287
1288                 match bg_processor.stop() {
1289                         Ok(_) => panic!("Expected error persisting network graph"),
1290                         Err(e) => {
1291                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1292                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1293                         },
1294                 }
1295         }
1296
1297         #[test]
1298         fn test_scorer_persist_error() {
1299                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1300                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1301                 let data_dir = nodes[0].persister.get_data_dir();
1302                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1303                 let event_handler = |_: _| {};
1304                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1305
1306                 match bg_processor.stop() {
1307                         Ok(_) => panic!("Expected error persisting scorer"),
1308                         Err(e) => {
1309                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1310                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1311                         },
1312                 }
1313         }
1314
1315         #[test]
1316         fn test_background_event_handling() {
1317                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1318                 let channel_value = 100000;
1319                 let data_dir = nodes[0].persister.get_data_dir();
1320                 let persister = Arc::new(Persister::new(data_dir.clone()));
1321
1322                 // Set up a background event handler for FundingGenerationReady events.
1323                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1324                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1325                 let event_handler = move |event: Event| match event {
1326                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1327                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1328                         Event::ChannelReady { .. } => {},
1329                         _ => panic!("Unexpected event: {:?}", event),
1330                 };
1331
1332                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1333
1334                 // Open a channel and check that the FundingGenerationReady event was handled.
1335                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1336                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1337                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1338                         .expect("FundingGenerationReady not handled within deadline");
1339                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1340                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1341                 get_event!(nodes[1], Event::ChannelPending);
1342                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1343                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1344                         .expect("ChannelPending not handled within deadline");
1345
1346                 // Confirm the funding transaction.
1347                 confirm_transaction(&mut nodes[0], &funding_tx);
1348                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1349                 confirm_transaction(&mut nodes[1], &funding_tx);
1350                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1351                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1352                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1353                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1354                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1355
1356                 if !std::thread::panicking() {
1357                         bg_processor.stop().unwrap();
1358                 }
1359
1360                 // Set up a background event handler for SpendableOutputs events.
1361                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1362                 let event_handler = move |event: Event| match event {
1363                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1364                         Event::ChannelReady { .. } => {},
1365                         Event::ChannelClosed { .. } => {},
1366                         _ => panic!("Unexpected event: {:?}", event),
1367                 };
1368                 let persister = Arc::new(Persister::new(data_dir));
1369                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1370
1371                 // Force close the channel and check that the SpendableOutputs event was handled.
1372                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1373                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1374                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1375
1376                 let event = receiver
1377                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1378                         .expect("Events not handled within deadline");
1379                 match event {
1380                         Event::SpendableOutputs { .. } => {},
1381                         _ => panic!("Unexpected event: {:?}", event),
1382                 }
1383
1384                 if !std::thread::panicking() {
1385                         bg_processor.stop().unwrap();
1386                 }
1387         }
1388
1389         #[test]
1390         fn test_scorer_persistence() {
1391                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1392                 let data_dir = nodes[0].persister.get_data_dir();
1393                 let persister = Arc::new(Persister::new(data_dir));
1394                 let event_handler = |_: _| {};
1395                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1396
1397                 loop {
1398                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1399                         let expected_log = "Persisting scorer".to_string();
1400                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1401                                 break
1402                         }
1403                 }
1404
1405                 if !std::thread::panicking() {
1406                         bg_processor.stop().unwrap();
1407                 }
1408         }
1409
1410         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1411                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1412                         let features = ChannelFeatures::empty();
1413                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1414                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1415                         ).expect("Failed to update channel from partial announcement");
1416                         let original_graph_description = $nodes[0].network_graph.to_string();
1417                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1418                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1419
1420                         loop {
1421                                 $sleep;
1422                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1423                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1424                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1425                                         .unwrap_or(&0) > 1
1426                                 {
1427                                         // Wait until the loop has gone around at least twice.
1428                                         break
1429                                 }
1430                         }
1431
1432                         let initialization_input = vec![
1433                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1434                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1435                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1436                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1437                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1438                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1439                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1440                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1441                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1442                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1443                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1444                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1445                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1446                         ];
1447                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1448
1449                         // this should have added two channels and pruned the previous one.
1450                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1451
1452                         $receive.expect("Network graph not pruned within deadline");
1453
1454                         // all channels should now be pruned
1455                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1456                 }
1457         }
1458
1459         #[test]
1460         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1461                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1462
1463                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1464                 let data_dir = nodes[0].persister.get_data_dir();
1465                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1466
1467                 let event_handler = |_: _| {};
1468                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1469
1470                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1471                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1472                         std::thread::sleep(Duration::from_millis(1)));
1473
1474                 background_processor.stop().unwrap();
1475         }
1476
1477         #[tokio::test]
1478         #[cfg(feature = "futures")]
1479         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1480                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1481
1482                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
1483                 let data_dir = nodes[0].persister.get_data_dir();
1484                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1485
1486                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1487                 let bp_future = super::process_events_async(
1488                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1489                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1490                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1491                                 let mut exit_receiver = exit_receiver.clone();
1492                                 Box::pin(async move {
1493                                         tokio::select! {
1494                                                 _ = tokio::time::sleep(dur) => false,
1495                                                 _ = exit_receiver.changed() => true,
1496                                         }
1497                                 })
1498                         }, false,
1499                 );
1500
1501                 let t1 = tokio::spawn(bp_future);
1502                 let t2 = tokio::spawn(async move {
1503                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1504                                 let mut i = 0;
1505                                 loop {
1506                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1507                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1508                                         assert!(i < 5);
1509                                         i += 1;
1510                                 }
1511                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1512                         exit_sender.send(()).unwrap();
1513                 });
1514                 let (r1, r2) = tokio::join!(t1, t2);
1515                 r1.unwrap().unwrap();
1516                 r2.unwrap()
1517         }
1518
1519         macro_rules! do_test_payment_path_scoring {
1520                 ($nodes: expr, $receive: expr) => {
1521                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1522                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1523                         // public or else we won't score it).
1524                         // A background event handler for FundingGenerationReady events must be hooked up to a
1525                         // running background processor.
1526                         let scored_scid = 4242;
1527                         let secp_ctx = Secp256k1::new();
1528                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1529                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1530
1531                         let path = Path { hops: vec![RouteHop {
1532                                 pubkey: node_1_id,
1533                                 node_features: NodeFeatures::empty(),
1534                                 short_channel_id: scored_scid,
1535                                 channel_features: ChannelFeatures::empty(),
1536                                 fee_msat: 0,
1537                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1538                         }], blinded_tail: None };
1539
1540                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1541                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1542                                 payment_id: None,
1543                                 payment_hash: PaymentHash([42; 32]),
1544                                 payment_failed_permanently: false,
1545                                 failure: PathFailure::OnPath { network_update: None },
1546                                 path: path.clone(),
1547                                 short_channel_id: Some(scored_scid),
1548                         });
1549                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1550                         match event {
1551                                 Event::PaymentPathFailed { .. } => {},
1552                                 _ => panic!("Unexpected event"),
1553                         }
1554
1555                         // Ensure we'll score payments that were explicitly failed back by the destination as
1556                         // ProbeSuccess.
1557                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1558                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1559                                 payment_id: None,
1560                                 payment_hash: PaymentHash([42; 32]),
1561                                 payment_failed_permanently: true,
1562                                 failure: PathFailure::OnPath { network_update: None },
1563                                 path: path.clone(),
1564                                 short_channel_id: None,
1565                         });
1566                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1567                         match event {
1568                                 Event::PaymentPathFailed { .. } => {},
1569                                 _ => panic!("Unexpected event"),
1570                         }
1571
1572                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1573                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1574                                 payment_id: PaymentId([42; 32]),
1575                                 payment_hash: None,
1576                                 path: path.clone(),
1577                         });
1578                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1579                         match event {
1580                                 Event::PaymentPathSuccessful { .. } => {},
1581                                 _ => panic!("Unexpected event"),
1582                         }
1583
1584                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1585                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1586                                 payment_id: PaymentId([42; 32]),
1587                                 payment_hash: PaymentHash([42; 32]),
1588                                 path: path.clone(),
1589                         });
1590                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1591                         match event {
1592                                 Event::ProbeSuccessful  { .. } => {},
1593                                 _ => panic!("Unexpected event"),
1594                         }
1595
1596                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1597                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1598                                 payment_id: PaymentId([42; 32]),
1599                                 payment_hash: PaymentHash([42; 32]),
1600                                 path,
1601                                 short_channel_id: Some(scored_scid),
1602                         });
1603                         let event = $receive.expect("ProbeFailure not handled within deadline");
1604                         match event {
1605                                 Event::ProbeFailed { .. } => {},
1606                                 _ => panic!("Unexpected event"),
1607                         }
1608                 }
1609         }
1610
1611         #[test]
1612         fn test_payment_path_scoring() {
1613                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1614                 let event_handler = move |event: Event| match event {
1615                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1616                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1617                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1618                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1619                         _ => panic!("Unexpected event: {:?}", event),
1620                 };
1621
1622                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1623                 let data_dir = nodes[0].persister.get_data_dir();
1624                 let persister = Arc::new(Persister::new(data_dir));
1625                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1626
1627                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1628
1629                 if !std::thread::panicking() {
1630                         bg_processor.stop().unwrap();
1631                 }
1632         }
1633
1634         #[tokio::test]
1635         #[cfg(feature = "futures")]
1636         async fn test_payment_path_scoring_async() {
1637                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1638                 let event_handler = move |event: Event| {
1639                         let sender_ref = sender.clone();
1640                         async move {
1641                                 match event {
1642                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1643                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1644                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1645                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1646                                         _ => panic!("Unexpected event: {:?}", event),
1647                                 }
1648                         }
1649                 };
1650
1651                 let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
1652                 let data_dir = nodes[0].persister.get_data_dir();
1653                 let persister = Arc::new(Persister::new(data_dir));
1654
1655                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1656
1657                 let bp_future = super::process_events_async(
1658                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1659                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1660                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1661                                 let mut exit_receiver = exit_receiver.clone();
1662                                 Box::pin(async move {
1663                                         tokio::select! {
1664                                                 _ = tokio::time::sleep(dur) => false,
1665                                                 _ = exit_receiver.changed() => true,
1666                                         }
1667                                 })
1668                         }, false,
1669                 );
1670                 let t1 = tokio::spawn(bp_future);
1671                 let t2 = tokio::spawn(async move {
1672                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1673                         exit_sender.send(()).unwrap();
1674                 });
1675
1676                 let (r1, r2) = tokio::join!(t1, t2);
1677                 r1.unwrap().unwrap();
1678                 r2.unwrap()
1679         }
1680 }