]> git.bitcoin.ninja Git - rust-lightning/blob - lightning-background-processor/src/lib.rs
Allow events processing without holding `total_consistency_lock`
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
68 ///   at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(feature = "futures")]
120 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
121 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
122 #[cfg(feature = "futures")]
123 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
124         min_u64(SCORER_PERSIST_TIMER, FIRST_NETWORK_PRUNE_TIMER));
125
126 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
127 pub enum GossipSync<
128         P: Deref<Target = P2PGossipSync<G, U, L>>,
129         R: Deref<Target = RapidGossipSync<G, L>>,
130         G: Deref<Target = NetworkGraph<L>>,
131         U: Deref,
132         L: Deref,
133 >
134 where U::Target: UtxoLookup, L::Target: Logger {
135         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
136         P2P(P),
137         /// Rapid gossip sync from a trusted server.
138         Rapid(R),
139         /// No gossip sync.
140         None,
141 }
142
143 impl<
144         P: Deref<Target = P2PGossipSync<G, U, L>>,
145         R: Deref<Target = RapidGossipSync<G, L>>,
146         G: Deref<Target = NetworkGraph<L>>,
147         U: Deref,
148         L: Deref,
149 > GossipSync<P, R, G, U, L>
150 where U::Target: UtxoLookup, L::Target: Logger {
151         fn network_graph(&self) -> Option<&G> {
152                 match self {
153                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
154                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
155                         GossipSync::None => None,
156                 }
157         }
158
159         fn prunable_network_graph(&self) -> Option<&G> {
160                 match self {
161                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
162                         GossipSync::Rapid(gossip_sync) => {
163                                 if gossip_sync.is_initial_sync_complete() {
164                                         Some(gossip_sync.network_graph())
165                                 } else {
166                                         None
167                                 }
168                         },
169                         GossipSync::None => None,
170                 }
171         }
172 }
173
174 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
175 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
176         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
177 where
178         U::Target: UtxoLookup,
179         L::Target: Logger,
180 {
181         /// Initializes a new [`GossipSync::P2P`] variant.
182         pub fn p2p(gossip_sync: P) -> Self {
183                 GossipSync::P2P(gossip_sync)
184         }
185 }
186
187 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
188 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
189         GossipSync<
190                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
191                 R,
192                 G,
193                 &'a (dyn UtxoLookup + Send + Sync),
194                 L,
195         >
196 where
197         L::Target: Logger,
198 {
199         /// Initializes a new [`GossipSync::Rapid`] variant.
200         pub fn rapid(gossip_sync: R) -> Self {
201                 GossipSync::Rapid(gossip_sync)
202         }
203 }
204
205 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
206 impl<'a, L: Deref>
207         GossipSync<
208                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
209                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
210                 &'a NetworkGraph<L>,
211                 &'a (dyn UtxoLookup + Send + Sync),
212                 L,
213         >
214 where
215         L::Target: Logger,
216 {
217         /// Initializes a new [`GossipSync::None`] variant.
218         pub fn none() -> Self {
219                 GossipSync::None
220         }
221 }
222
223 fn handle_network_graph_update<L: Deref>(
224         network_graph: &NetworkGraph<L>, event: &Event
225 ) where L::Target: Logger {
226         if let Event::PaymentPathFailed {
227                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
228         {
229                 network_graph.handle_network_update(upd);
230         }
231 }
232
233 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
234         scorer: &'a S, event: &Event
235 ) {
236         let mut score = scorer.lock();
237         match event {
238                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
239                         let path = path.iter().collect::<Vec<_>>();
240                         score.payment_path_failed(&path, *scid);
241                 },
242                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
243                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
244                         // because the payment made it all the way to the destination with sufficient liquidity.
245                         let path = path.iter().collect::<Vec<_>>();
246                         score.probe_successful(&path);
247                 },
248                 Event::PaymentPathSuccessful { path, .. } => {
249                         let path = path.iter().collect::<Vec<_>>();
250                         score.payment_path_successful(&path);
251                 },
252                 Event::ProbeSuccessful { path, .. } => {
253                         let path = path.iter().collect::<Vec<_>>();
254                         score.probe_successful(&path);
255                 },
256                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
257                         let path = path.iter().collect::<Vec<_>>();
258                         score.probe_failed(&path, *scid);
259                 },
260                 _ => {},
261         }
262 }
263
264 macro_rules! define_run_body {
265         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
266          $channel_manager: ident, $process_channel_manager_events: expr,
267          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
268          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
269          $check_slow_await: expr)
270         => { {
271                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
272                 $channel_manager.timer_tick_occurred();
273
274                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
275                 let mut last_ping_call = $get_timer(PING_TIMER);
276                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
277                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
278                 let mut have_pruned = false;
279
280                 loop {
281                         $process_channel_manager_events;
282                         $process_chain_monitor_events;
283
284                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
285                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
286                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
287                         // without running the normal event processing above and handing events to users.
288                         //
289                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
290                         // processing a message effectively at any point during this loop. In order to
291                         // minimize the time between such processing completing and persisting the updated
292                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
293                         // generally, and as a fallback place such blocking only immediately before
294                         // persistence.
295                         $peer_manager.process_events();
296
297                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
298                         // see `await_start`'s use below.
299                         let mut await_start = None;
300                         if $check_slow_await { await_start = Some($get_timer(1)); }
301                         let updates_available = $await;
302                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
303
304                         if updates_available {
305                                 log_trace!($logger, "Persisting ChannelManager...");
306                                 $persister.persist_manager(&*$channel_manager)?;
307                                 log_trace!($logger, "Done persisting ChannelManager.");
308                         }
309                         // Exit the loop if the background processor was requested to stop.
310                         if $loop_exit_check {
311                                 log_trace!($logger, "Terminating background processor.");
312                                 break;
313                         }
314                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
315                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
316                                 $channel_manager.timer_tick_occurred();
317                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
318                         }
319                         if await_slow {
320                                 // On various platforms, we may be starved of CPU cycles for several reasons.
321                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
322                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
323                                 // may not get any cycles.
324                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
325                                 // full second, at which point we assume sockets may have been killed (they
326                                 // appear to be at least on some platforms, even if it has only been a second).
327                                 // Note that we have to take care to not get here just because user event
328                                 // processing was slow at the top of the loop. For example, the sample client
329                                 // may call Bitcoin Core RPCs during event handling, which very often takes
330                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
331                                 // peers.
332                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
333                                 $peer_manager.disconnect_all_peers();
334                                 last_ping_call = $get_timer(PING_TIMER);
335                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
336                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
337                                 $peer_manager.timer_tick_occurred();
338                                 last_ping_call = $get_timer(PING_TIMER);
339                         }
340
341                         // Note that we want to run a graph prune once not long after startup before
342                         // falling back to our usual hourly prunes. This avoids short-lived clients never
343                         // pruning their network graph. We run once 60 seconds after startup before
344                         // continuing our normal cadence.
345                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
346                         if $timer_elapsed(&mut last_prune_call, prune_timer) {
347                                 // The network graph must not be pruned while rapid sync completion is pending
348                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
349                                         #[cfg(feature = "std")] {
350                                                 log_trace!($logger, "Pruning and persisting network graph.");
351                                                 network_graph.remove_stale_channels_and_tracking();
352                                         }
353                                         #[cfg(not(feature = "std"))] {
354                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
355                                                 log_trace!($logger, "Persisting network graph.");
356                                         }
357
358                                         if let Err(e) = $persister.persist_graph(network_graph) {
359                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
360                                         }
361
362                                         have_pruned = true;
363                                 }
364                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
365                                 last_prune_call = $get_timer(prune_timer);
366                         }
367
368                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
369                                 if let Some(ref scorer) = $scorer {
370                                         log_trace!($logger, "Persisting scorer");
371                                         if let Err(e) = $persister.persist_scorer(&scorer) {
372                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
373                                         }
374                                 }
375                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
376                         }
377                 }
378
379                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
380                 // some races where users quit while channel updates were in-flight, with
381                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
382                 $persister.persist_manager(&*$channel_manager)?;
383
384                 // Persist Scorer on exit
385                 if let Some(ref scorer) = $scorer {
386                         $persister.persist_scorer(&scorer)?;
387                 }
388
389                 // Persist NetworkGraph on exit
390                 if let Some(network_graph) = $gossip_sync.network_graph() {
391                         $persister.persist_graph(network_graph)?;
392                 }
393
394                 Ok(())
395         } }
396 }
397
398 #[cfg(feature = "futures")]
399 pub(crate) mod futures_util {
400         use core::future::Future;
401         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
402         use core::pin::Pin;
403         use core::marker::Unpin;
404         pub(crate) struct Selector<
405                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
406         > {
407                 pub a: A,
408                 pub b: B,
409                 pub c: C,
410         }
411         pub(crate) enum SelectorOutput {
412                 A, B, C(bool),
413         }
414
415         impl<
416                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
417         > Future for Selector<A, B, C> {
418                 type Output = SelectorOutput;
419                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
420                         match Pin::new(&mut self.a).poll(ctx) {
421                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
422                                 Poll::Pending => {},
423                         }
424                         match Pin::new(&mut self.b).poll(ctx) {
425                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
426                                 Poll::Pending => {},
427                         }
428                         match Pin::new(&mut self.c).poll(ctx) {
429                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
430                                 Poll::Pending => {},
431                         }
432                         Poll::Pending
433                 }
434         }
435
436         // If we want to poll a future without an async context to figure out if it has completed or
437         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
438         // but sadly there's a good bit of boilerplate here.
439         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
440         fn dummy_waker_action(_: *const ()) { }
441
442         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
443                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
444         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
445 }
446 #[cfg(feature = "futures")]
447 use futures_util::{Selector, SelectorOutput, dummy_waker};
448 #[cfg(feature = "futures")]
449 use core::task;
450
451 /// Processes background events in a future.
452 ///
453 /// `sleeper` should return a future which completes in the given amount of time and returns a
454 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
455 /// future which outputs true, the loop will exit and this function's future will complete.
456 ///
457 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
458 ///
459 /// Requires the `futures` feature. Note that while this method is available without the `std`
460 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
461 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
462 /// manually instead.
463 ///
464 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
465 /// mobile device, where we may need to check for interruption of the application regularly. If you
466 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
467 /// are hundreds or thousands of simultaneous process calls running.
468 #[cfg(feature = "futures")]
469 pub async fn process_events_async<
470         'a,
471         UL: 'static + Deref + Send + Sync,
472         CF: 'static + Deref + Send + Sync,
473         CW: 'static + Deref + Send + Sync,
474         T: 'static + Deref + Send + Sync,
475         ES: 'static + Deref + Send + Sync,
476         NS: 'static + Deref + Send + Sync,
477         SP: 'static + Deref + Send + Sync,
478         F: 'static + Deref + Send + Sync,
479         R: 'static + Deref + Send + Sync,
480         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
481         L: 'static + Deref + Send + Sync,
482         P: 'static + Deref + Send + Sync,
483         Descriptor: 'static + SocketDescriptor + Send + Sync,
484         CMH: 'static + Deref + Send + Sync,
485         RMH: 'static + Deref + Send + Sync,
486         OMH: 'static + Deref + Send + Sync,
487         EventHandlerFuture: core::future::Future<Output = ()>,
488         EventHandler: Fn(Event) -> EventHandlerFuture,
489         PS: 'static + Deref + Send,
490         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
491         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
492         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
493         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
494         UMH: 'static + Deref + Send + Sync,
495         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
496         S: 'static + Deref<Target = SC> + Send + Sync,
497         SC: for<'b> WriteableScore<'b>,
498         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
499         Sleeper: Fn(Duration) -> SleepFuture
500 >(
501         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
502         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
503         sleeper: Sleeper, mobile_interruptable_platform: bool,
504 ) -> Result<(), lightning::io::Error>
505 where
506         UL::Target: 'static + UtxoLookup,
507         CF::Target: 'static + chain::Filter,
508         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
509         T::Target: 'static + BroadcasterInterface,
510         ES::Target: 'static + EntropySource,
511         NS::Target: 'static + NodeSigner,
512         SP::Target: 'static + SignerProvider,
513         F::Target: 'static + FeeEstimator,
514         R::Target: 'static + Router,
515         L::Target: 'static + Logger,
516         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
517         CMH::Target: 'static + ChannelMessageHandler,
518         OMH::Target: 'static + OnionMessageHandler,
519         RMH::Target: 'static + RoutingMessageHandler,
520         UMH::Target: 'static + CustomMessageHandler,
521         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
522 {
523         let mut should_break = false;
524         let async_event_handler = |event| {
525                 let network_graph = gossip_sync.network_graph();
526                 let event_handler = &event_handler;
527                 let scorer = &scorer;
528                 async move {
529                         if let Some(network_graph) = network_graph {
530                                 handle_network_graph_update(network_graph, &event)
531                         }
532                         if let Some(ref scorer) = scorer {
533                                 update_scorer(scorer, &event);
534                         }
535                         event_handler(event).await;
536                 }
537         };
538         define_run_body!(persister,
539                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
540                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
541                 gossip_sync, peer_manager, logger, scorer, should_break, {
542                         let fut = Selector {
543                                 a: channel_manager.get_persistable_update_future(),
544                                 b: chain_monitor.get_update_future(),
545                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
546                         };
547                         match fut.await {
548                                 SelectorOutput::A => true,
549                                 SelectorOutput::B => false,
550                                 SelectorOutput::C(exit) => {
551                                         should_break = exit;
552                                         false
553                                 }
554                         }
555                 }, |t| sleeper(Duration::from_secs(t)),
556                 |fut: &mut SleepFuture, _| {
557                         let mut waker = dummy_waker();
558                         let mut ctx = task::Context::from_waker(&mut waker);
559                         match core::pin::Pin::new(fut).poll(&mut ctx) {
560                                 task::Poll::Ready(exit) => { should_break = exit; true },
561                                 task::Poll::Pending => false,
562                         }
563                 }, mobile_interruptable_platform)
564 }
565
566 #[cfg(feature = "std")]
567 impl BackgroundProcessor {
568         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
569         /// documentation].
570         ///
571         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
572         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
573         /// either [`join`] or [`stop`].
574         ///
575         /// # Data Persistence
576         ///
577         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
578         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
579         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
580         /// provided implementation.
581         ///
582         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
583         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
584         /// See the `lightning-persister` crate for LDK's provided implementation.
585         ///
586         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
587         /// error or call [`join`] and handle any error that may arise. For the latter case,
588         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
589         ///
590         /// # Event Handling
591         ///
592         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
593         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
594         /// functionality implemented by other handlers.
595         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
596         ///
597         /// # Rapid Gossip Sync
598         ///
599         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
600         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
601         /// until the [`RapidGossipSync`] instance completes its first sync.
602         ///
603         /// [top-level documentation]: BackgroundProcessor
604         /// [`join`]: Self::join
605         /// [`stop`]: Self::stop
606         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
607         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
608         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
609         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
610         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
611         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
612         pub fn start<
613                 'a,
614                 UL: 'static + Deref + Send + Sync,
615                 CF: 'static + Deref + Send + Sync,
616                 CW: 'static + Deref + Send + Sync,
617                 T: 'static + Deref + Send + Sync,
618                 ES: 'static + Deref + Send + Sync,
619                 NS: 'static + Deref + Send + Sync,
620                 SP: 'static + Deref + Send + Sync,
621                 F: 'static + Deref + Send + Sync,
622                 R: 'static + Deref + Send + Sync,
623                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
624                 L: 'static + Deref + Send + Sync,
625                 P: 'static + Deref + Send + Sync,
626                 Descriptor: 'static + SocketDescriptor + Send + Sync,
627                 CMH: 'static + Deref + Send + Sync,
628                 OMH: 'static + Deref + Send + Sync,
629                 RMH: 'static + Deref + Send + Sync,
630                 EH: 'static + EventHandler + Send,
631                 PS: 'static + Deref + Send,
632                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
633                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
634                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
635                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
636                 UMH: 'static + Deref + Send + Sync,
637                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
638                 S: 'static + Deref<Target = SC> + Send + Sync,
639                 SC: for <'b> WriteableScore<'b>,
640         >(
641                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
642                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
643         ) -> Self
644         where
645                 UL::Target: 'static + UtxoLookup,
646                 CF::Target: 'static + chain::Filter,
647                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
648                 T::Target: 'static + BroadcasterInterface,
649                 ES::Target: 'static + EntropySource,
650                 NS::Target: 'static + NodeSigner,
651                 SP::Target: 'static + SignerProvider,
652                 F::Target: 'static + FeeEstimator,
653                 R::Target: 'static + Router,
654                 L::Target: 'static + Logger,
655                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
656                 CMH::Target: 'static + ChannelMessageHandler,
657                 OMH::Target: 'static + OnionMessageHandler,
658                 RMH::Target: 'static + RoutingMessageHandler,
659                 UMH::Target: 'static + CustomMessageHandler,
660                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
661         {
662                 let stop_thread = Arc::new(AtomicBool::new(false));
663                 let stop_thread_clone = stop_thread.clone();
664                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
665                         let event_handler = |event| {
666                                 let network_graph = gossip_sync.network_graph();
667                                 if let Some(network_graph) = network_graph {
668                                         handle_network_graph_update(network_graph, &event)
669                                 }
670                                 if let Some(ref scorer) = scorer {
671                                         update_scorer(scorer, &event);
672                                 }
673                                 event_handler.handle_event(event);
674                         };
675                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
676                                 channel_manager, channel_manager.process_pending_events(&event_handler),
677                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
678                                 Sleeper::from_two_futures(
679                                         channel_manager.get_persistable_update_future(),
680                                         chain_monitor.get_update_future()
681                                 ).wait_timeout(Duration::from_millis(100)),
682                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
683                 });
684                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
685         }
686
687         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
688         /// [`ChannelManager`].
689         ///
690         /// # Panics
691         ///
692         /// This function panics if the background thread has panicked such as while persisting or
693         /// handling events.
694         ///
695         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
696         pub fn join(mut self) -> Result<(), std::io::Error> {
697                 assert!(self.thread_handle.is_some());
698                 self.join_thread()
699         }
700
701         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
702         /// [`ChannelManager`].
703         ///
704         /// # Panics
705         ///
706         /// This function panics if the background thread has panicked such as while persisting or
707         /// handling events.
708         ///
709         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
710         pub fn stop(mut self) -> Result<(), std::io::Error> {
711                 assert!(self.thread_handle.is_some());
712                 self.stop_and_join_thread()
713         }
714
715         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
716                 self.stop_thread.store(true, Ordering::Release);
717                 self.join_thread()
718         }
719
720         fn join_thread(&mut self) -> Result<(), std::io::Error> {
721                 match self.thread_handle.take() {
722                         Some(handle) => handle.join().unwrap(),
723                         None => Ok(()),
724                 }
725         }
726 }
727
728 #[cfg(feature = "std")]
729 impl Drop for BackgroundProcessor {
730         fn drop(&mut self) {
731                 self.stop_and_join_thread().unwrap();
732         }
733 }
734
735 #[cfg(all(feature = "std", test))]
736 mod tests {
737         use bitcoin::blockdata::block::BlockHeader;
738         use bitcoin::blockdata::constants::genesis_block;
739         use bitcoin::blockdata::locktime::PackedLockTime;
740         use bitcoin::blockdata::transaction::{Transaction, TxOut};
741         use bitcoin::network::constants::Network;
742         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
743         use lightning::chain::{BestBlock, Confirm, chainmonitor};
744         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
745         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
746         use lightning::chain::transaction::OutPoint;
747         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
748         use lightning::{get_event_msg, get_event};
749         use lightning::ln::PaymentHash;
750         use lightning::ln::channelmanager;
751         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
752         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
753         use lightning::ln::msgs::{ChannelMessageHandler, Init};
754         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
755         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
756         use lightning::routing::router::{DefaultRouter, RouteHop};
757         use lightning::routing::scoring::{ChannelUsage, Score};
758         use lightning::util::config::UserConfig;
759         use lightning::util::ser::Writeable;
760         use lightning::util::test_utils;
761         use lightning::util::persist::KVStorePersister;
762         use lightning_persister::FilesystemPersister;
763         use std::collections::VecDeque;
764         use std::fs;
765         use std::path::PathBuf;
766         use std::sync::{Arc, Mutex};
767         use std::sync::mpsc::SyncSender;
768         use std::time::Duration;
769         use bitcoin::hashes::Hash;
770         use bitcoin::TxMerkleNode;
771         use lightning_rapid_gossip_sync::RapidGossipSync;
772         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
773
774         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
775
776         #[derive(Clone, Hash, PartialEq, Eq)]
777         struct TestDescriptor{}
778         impl SocketDescriptor for TestDescriptor {
779                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
780                         0
781                 }
782
783                 fn disconnect_socket(&mut self) {}
784         }
785
786         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
787
788         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
789
790         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
791         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
792
793         struct Node {
794                 node: Arc<ChannelManager>,
795                 p2p_gossip_sync: PGS,
796                 rapid_gossip_sync: RGS,
797                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
798                 chain_monitor: Arc<ChainMonitor>,
799                 persister: Arc<FilesystemPersister>,
800                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
801                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
802                 logger: Arc<test_utils::TestLogger>,
803                 best_block: BestBlock,
804                 scorer: Arc<Mutex<TestScorer>>,
805         }
806
807         impl Node {
808                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
809                         GossipSync::P2P(self.p2p_gossip_sync.clone())
810                 }
811
812                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
813                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
814                 }
815
816                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
817                         GossipSync::None
818                 }
819         }
820
821         impl Drop for Node {
822                 fn drop(&mut self) {
823                         let data_dir = self.persister.get_data_dir();
824                         match fs::remove_dir_all(data_dir.clone()) {
825                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
826                                 _ => {}
827                         }
828                 }
829         }
830
831         struct Persister {
832                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
833                 graph_persistence_notifier: Option<SyncSender<()>>,
834                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
835                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
836                 filesystem_persister: FilesystemPersister,
837         }
838
839         impl Persister {
840                 fn new(data_dir: String) -> Self {
841                         let filesystem_persister = FilesystemPersister::new(data_dir);
842                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
843                 }
844
845                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
846                         Self { graph_error: Some((error, message)), ..self }
847                 }
848
849                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
850                         Self { graph_persistence_notifier: Some(sender), ..self }
851                 }
852
853                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
854                         Self { manager_error: Some((error, message)), ..self }
855                 }
856
857                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
858                         Self { scorer_error: Some((error, message)), ..self }
859                 }
860         }
861
862         impl KVStorePersister for Persister {
863                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
864                         if key == "manager" {
865                                 if let Some((error, message)) = self.manager_error {
866                                         return Err(std::io::Error::new(error, message))
867                                 }
868                         }
869
870                         if key == "network_graph" {
871                                 if let Some(sender) = &self.graph_persistence_notifier {
872                                         match sender.send(()) {
873                                                 Ok(()) => {},
874                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
875                                         }
876                                 };
877
878                                 if let Some((error, message)) = self.graph_error {
879                                         return Err(std::io::Error::new(error, message))
880                                 }
881                         }
882
883                         if key == "scorer" {
884                                 if let Some((error, message)) = self.scorer_error {
885                                         return Err(std::io::Error::new(error, message))
886                                 }
887                         }
888
889                         self.filesystem_persister.persist(key, object)
890                 }
891         }
892
893         struct TestScorer {
894                 event_expectations: Option<VecDeque<TestResult>>,
895         }
896
897         #[derive(Debug)]
898         enum TestResult {
899                 PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
900                 PaymentSuccess { path: Vec<RouteHop> },
901                 ProbeFailure { path: Vec<RouteHop> },
902                 ProbeSuccess { path: Vec<RouteHop> },
903         }
904
905         impl TestScorer {
906                 fn new() -> Self {
907                         Self { event_expectations: None }
908                 }
909
910                 fn expect(&mut self, expectation: TestResult) {
911                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
912                 }
913         }
914
915         impl lightning::util::ser::Writeable for TestScorer {
916                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
917         }
918
919         impl Score for TestScorer {
920                 fn channel_penalty_msat(
921                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
922                 ) -> u64 { unimplemented!(); }
923
924                 fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
925                         if let Some(expectations) = &mut self.event_expectations {
926                                 match expectations.pop_front().unwrap() {
927                                         TestResult::PaymentFailure { path, short_channel_id } => {
928                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
929                                                 assert_eq!(actual_short_channel_id, short_channel_id);
930                                         },
931                                         TestResult::PaymentSuccess { path } => {
932                                                 panic!("Unexpected successful payment path: {:?}", path)
933                                         },
934                                         TestResult::ProbeFailure { path } => {
935                                                 panic!("Unexpected probe failure: {:?}", path)
936                                         },
937                                         TestResult::ProbeSuccess { path } => {
938                                                 panic!("Unexpected probe success: {:?}", path)
939                                         }
940                                 }
941                         }
942                 }
943
944                 fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
945                         if let Some(expectations) = &mut self.event_expectations {
946                                 match expectations.pop_front().unwrap() {
947                                         TestResult::PaymentFailure { path, .. } => {
948                                                 panic!("Unexpected payment path failure: {:?}", path)
949                                         },
950                                         TestResult::PaymentSuccess { path } => {
951                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
952                                         },
953                                         TestResult::ProbeFailure { path } => {
954                                                 panic!("Unexpected probe failure: {:?}", path)
955                                         },
956                                         TestResult::ProbeSuccess { path } => {
957                                                 panic!("Unexpected probe success: {:?}", path)
958                                         }
959                                 }
960                         }
961                 }
962
963                 fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
964                         if let Some(expectations) = &mut self.event_expectations {
965                                 match expectations.pop_front().unwrap() {
966                                         TestResult::PaymentFailure { path, .. } => {
967                                                 panic!("Unexpected payment path failure: {:?}", path)
968                                         },
969                                         TestResult::PaymentSuccess { path } => {
970                                                 panic!("Unexpected payment path success: {:?}", path)
971                                         },
972                                         TestResult::ProbeFailure { path } => {
973                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
974                                         },
975                                         TestResult::ProbeSuccess { path } => {
976                                                 panic!("Unexpected probe success: {:?}", path)
977                                         }
978                                 }
979                         }
980                 }
981                 fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
982                         if let Some(expectations) = &mut self.event_expectations {
983                                 match expectations.pop_front().unwrap() {
984                                         TestResult::PaymentFailure { path, .. } => {
985                                                 panic!("Unexpected payment path failure: {:?}", path)
986                                         },
987                                         TestResult::PaymentSuccess { path } => {
988                                                 panic!("Unexpected payment path success: {:?}", path)
989                                         },
990                                         TestResult::ProbeFailure { path } => {
991                                                 panic!("Unexpected probe failure: {:?}", path)
992                                         },
993                                         TestResult::ProbeSuccess { path } => {
994                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
995                                         }
996                                 }
997                         }
998                 }
999         }
1000
1001         impl Drop for TestScorer {
1002                 fn drop(&mut self) {
1003                         if std::thread::panicking() {
1004                                 return;
1005                         }
1006
1007                         if let Some(event_expectations) = &self.event_expectations {
1008                                 if !event_expectations.is_empty() {
1009                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1010                                 }
1011                         }
1012                 }
1013         }
1014
1015         fn get_full_filepath(filepath: String, filename: String) -> String {
1016                 let mut path = PathBuf::from(filepath);
1017                 path.push(filename);
1018                 path.to_str().unwrap().to_string()
1019         }
1020
1021         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1022                 let mut nodes = Vec::new();
1023                 for i in 0..num_nodes {
1024                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
1025                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1026                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1027                         let network = Network::Testnet;
1028                         let genesis_block = genesis_block(network);
1029                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1030                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1031                         let seed = [i as u8; 32];
1032                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1033                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1034                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1035                         let now = Duration::from_secs(genesis_block.header.time as u64);
1036                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1037                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1038                         let best_block = BestBlock::from_network(network);
1039                         let params = ChainParameters { network, best_block };
1040                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1041                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1042                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1043                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1044                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1045                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1046                         nodes.push(node);
1047                 }
1048
1049                 for i in 0..num_nodes {
1050                         for j in (i+1)..num_nodes {
1051                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1052                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1053                         }
1054                 }
1055
1056                 nodes
1057         }
1058
1059         macro_rules! open_channel {
1060                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1061                         begin_open_channel!($node_a, $node_b, $channel_value);
1062                         let events = $node_a.node.get_and_clear_pending_events();
1063                         assert_eq!(events.len(), 1);
1064                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1065                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1066                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1067                         get_event!($node_b, Event::ChannelPending);
1068                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1069                         get_event!($node_a, Event::ChannelPending);
1070                         tx
1071                 }}
1072         }
1073
1074         macro_rules! begin_open_channel {
1075                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1076                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1077                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1078                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1079                 }}
1080         }
1081
1082         macro_rules! handle_funding_generation_ready {
1083                 ($event: expr, $channel_value: expr) => {{
1084                         match $event {
1085                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1086                                         assert_eq!(channel_value_satoshis, $channel_value);
1087                                         assert_eq!(user_channel_id, 42);
1088
1089                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1090                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1091                                         }]};
1092                                         (temporary_channel_id, tx)
1093                                 },
1094                                 _ => panic!("Unexpected event"),
1095                         }
1096                 }}
1097         }
1098
1099         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1100                 for i in 1..=depth {
1101                         let prev_blockhash = node.best_block.block_hash();
1102                         let height = node.best_block.height() + 1;
1103                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1104                         let txdata = vec![(0, tx)];
1105                         node.best_block = BestBlock::new(header.block_hash(), height);
1106                         match i {
1107                                 1 => {
1108                                         node.node.transactions_confirmed(&header, &txdata, height);
1109                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1110                                 },
1111                                 x if x == depth => {
1112                                         node.node.best_block_updated(&header, height);
1113                                         node.chain_monitor.best_block_updated(&header, height);
1114                                 },
1115                                 _ => {},
1116                         }
1117                 }
1118         }
1119         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1120                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1121         }
1122
1123         #[test]
1124         fn test_background_processor() {
1125                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1126                 // updates. Also test that when new updates are available, the manager signals that it needs
1127                 // re-persistence and is successfully re-persisted.
1128                 let nodes = create_nodes(2, "test_background_processor".to_string());
1129
1130                 // Go through the channel creation process so that each node has something to persist. Since
1131                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1132                 // avoid a race with processing events.
1133                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1134
1135                 // Initiate the background processors to watch each node.
1136                 let data_dir = nodes[0].persister.get_data_dir();
1137                 let persister = Arc::new(Persister::new(data_dir));
1138                 let event_handler = |_: _| {};
1139                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1140
1141                 macro_rules! check_persisted_data {
1142                         ($node: expr, $filepath: expr) => {
1143                                 let mut expected_bytes = Vec::new();
1144                                 loop {
1145                                         expected_bytes.clear();
1146                                         match $node.write(&mut expected_bytes) {
1147                                                 Ok(()) => {
1148                                                         match std::fs::read($filepath) {
1149                                                                 Ok(bytes) => {
1150                                                                         if bytes == expected_bytes {
1151                                                                                 break
1152                                                                         } else {
1153                                                                                 continue
1154                                                                         }
1155                                                                 },
1156                                                                 Err(_) => continue
1157                                                         }
1158                                                 },
1159                                                 Err(e) => panic!("Unexpected error: {}", e)
1160                                         }
1161                                 }
1162                         }
1163                 }
1164
1165                 // Check that the initial channel manager data is persisted as expected.
1166                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1167                 check_persisted_data!(nodes[0].node, filepath.clone());
1168
1169                 loop {
1170                         if !nodes[0].node.get_persistence_condvar_value() { break }
1171                 }
1172
1173                 // Force-close the channel.
1174                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1175
1176                 // Check that the force-close updates are persisted.
1177                 check_persisted_data!(nodes[0].node, filepath.clone());
1178                 loop {
1179                         if !nodes[0].node.get_persistence_condvar_value() { break }
1180                 }
1181
1182                 // Check network graph is persisted
1183                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1184                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1185
1186                 // Check scorer is persisted
1187                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1188                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1189
1190                 if !std::thread::panicking() {
1191                         bg_processor.stop().unwrap();
1192                 }
1193         }
1194
1195         #[test]
1196         fn test_timer_tick_called() {
1197                 // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
1198                 // `FRESHNESS_TIMER`.
1199                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1200                 let data_dir = nodes[0].persister.get_data_dir();
1201                 let persister = Arc::new(Persister::new(data_dir));
1202                 let event_handler = |_: _| {};
1203                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1204                 loop {
1205                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1206                         let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
1207                         let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
1208                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
1209                                         log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
1210                                 break
1211                         }
1212                 }
1213
1214                 if !std::thread::panicking() {
1215                         bg_processor.stop().unwrap();
1216                 }
1217         }
1218
1219         #[test]
1220         fn test_channel_manager_persist_error() {
1221                 // Test that if we encounter an error during manager persistence, the thread panics.
1222                 let nodes = create_nodes(2, "test_persist_error".to_string());
1223                 open_channel!(nodes[0], nodes[1], 100000);
1224
1225                 let data_dir = nodes[0].persister.get_data_dir();
1226                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1227                 let event_handler = |_: _| {};
1228                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1229                 match bg_processor.join() {
1230                         Ok(_) => panic!("Expected error persisting manager"),
1231                         Err(e) => {
1232                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1233                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1234                         },
1235                 }
1236         }
1237
1238         #[tokio::test]
1239         #[cfg(feature = "futures")]
1240         async fn test_channel_manager_persist_error_async() {
1241                 // Test that if we encounter an error during manager persistence, the thread panics.
1242                 let nodes = create_nodes(2, "test_persist_error_sync".to_string());
1243                 open_channel!(nodes[0], nodes[1], 100000);
1244
1245                 let data_dir = nodes[0].persister.get_data_dir();
1246                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1247
1248                 let bp_future = super::process_events_async(
1249                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1250                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1251                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1252                                 Box::pin(async move {
1253                                         tokio::time::sleep(dur).await;
1254                                         false // Never exit
1255                                 })
1256                         }, false,
1257                 );
1258                 match bp_future.await {
1259                         Ok(_) => panic!("Expected error persisting manager"),
1260                         Err(e) => {
1261                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1262                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1263                         },
1264                 }
1265         }
1266
1267         #[test]
1268         fn test_network_graph_persist_error() {
1269                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1270                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1271                 let data_dir = nodes[0].persister.get_data_dir();
1272                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1273                 let event_handler = |_: _| {};
1274                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1275
1276                 match bg_processor.stop() {
1277                         Ok(_) => panic!("Expected error persisting network graph"),
1278                         Err(e) => {
1279                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1280                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1281                         },
1282                 }
1283         }
1284
1285         #[test]
1286         fn test_scorer_persist_error() {
1287                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1288                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1289                 let data_dir = nodes[0].persister.get_data_dir();
1290                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1291                 let event_handler = |_: _| {};
1292                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1293
1294                 match bg_processor.stop() {
1295                         Ok(_) => panic!("Expected error persisting scorer"),
1296                         Err(e) => {
1297                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1298                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1299                         },
1300                 }
1301         }
1302
1303         #[test]
1304         fn test_background_event_handling() {
1305                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1306                 let channel_value = 100000;
1307                 let data_dir = nodes[0].persister.get_data_dir();
1308                 let persister = Arc::new(Persister::new(data_dir.clone()));
1309
1310                 // Set up a background event handler for FundingGenerationReady events.
1311                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1312                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1313                 let event_handler = move |event: Event| match event {
1314                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1315                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1316                         Event::ChannelReady { .. } => {},
1317                         _ => panic!("Unexpected event: {:?}", event),
1318                 };
1319
1320                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1321
1322                 // Open a channel and check that the FundingGenerationReady event was handled.
1323                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1324                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1325                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1326                         .expect("FundingGenerationReady not handled within deadline");
1327                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1328                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1329                 get_event!(nodes[1], Event::ChannelPending);
1330                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1331                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1332                         .expect("ChannelPending not handled within deadline");
1333
1334                 // Confirm the funding transaction.
1335                 confirm_transaction(&mut nodes[0], &funding_tx);
1336                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1337                 confirm_transaction(&mut nodes[1], &funding_tx);
1338                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1339                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1340                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1341                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1342                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1343
1344                 if !std::thread::panicking() {
1345                         bg_processor.stop().unwrap();
1346                 }
1347
1348                 // Set up a background event handler for SpendableOutputs events.
1349                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1350                 let event_handler = move |event: Event| match event {
1351                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1352                         Event::ChannelReady { .. } => {},
1353                         Event::ChannelClosed { .. } => {},
1354                         _ => panic!("Unexpected event: {:?}", event),
1355                 };
1356                 let persister = Arc::new(Persister::new(data_dir));
1357                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1358
1359                 // Force close the channel and check that the SpendableOutputs event was handled.
1360                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1361                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1362                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1363
1364                 let event = receiver
1365                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1366                         .expect("Events not handled within deadline");
1367                 match event {
1368                         Event::SpendableOutputs { .. } => {},
1369                         _ => panic!("Unexpected event: {:?}", event),
1370                 }
1371
1372                 if !std::thread::panicking() {
1373                         bg_processor.stop().unwrap();
1374                 }
1375         }
1376
1377         #[test]
1378         fn test_scorer_persistence() {
1379                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1380                 let data_dir = nodes[0].persister.get_data_dir();
1381                 let persister = Arc::new(Persister::new(data_dir));
1382                 let event_handler = |_: _| {};
1383                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1384
1385                 loop {
1386                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1387                         let expected_log = "Persisting scorer".to_string();
1388                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1389                                 break
1390                         }
1391                 }
1392
1393                 if !std::thread::panicking() {
1394                         bg_processor.stop().unwrap();
1395                 }
1396         }
1397
1398         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1399                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1400                         let features = ChannelFeatures::empty();
1401                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1402                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1403                         ).expect("Failed to update channel from partial announcement");
1404                         let original_graph_description = $nodes[0].network_graph.to_string();
1405                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1406                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1407
1408                         loop {
1409                                 $sleep;
1410                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1411                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1412                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1413                                         .unwrap_or(&0) > 1
1414                                 {
1415                                         // Wait until the loop has gone around at least twice.
1416                                         break
1417                                 }
1418                         }
1419
1420                         let initialization_input = vec![
1421                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1422                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1423                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1424                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1425                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1426                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1427                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1428                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1429                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1430                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1431                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1432                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1433                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1434                         ];
1435                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1436
1437                         // this should have added two channels and pruned the previous one.
1438                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1439
1440                         $receive.expect("Network graph not pruned within deadline");
1441
1442                         // all channels should now be pruned
1443                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1444                 }
1445         }
1446
1447         #[test]
1448         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1449                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1450
1451                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1452                 let data_dir = nodes[0].persister.get_data_dir();
1453                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1454
1455                 let event_handler = |_: _| {};
1456                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1457
1458                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1459                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1460                         std::thread::sleep(Duration::from_millis(1)));
1461
1462                 background_processor.stop().unwrap();
1463         }
1464
1465         #[tokio::test]
1466         #[cfg(feature = "futures")]
1467         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1468                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1469
1470                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
1471                 let data_dir = nodes[0].persister.get_data_dir();
1472                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1473
1474                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1475                 let bp_future = super::process_events_async(
1476                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1477                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1478                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1479                                 let mut exit_receiver = exit_receiver.clone();
1480                                 Box::pin(async move {
1481                                         tokio::select! {
1482                                                 _ = tokio::time::sleep(dur) => false,
1483                                                 _ = exit_receiver.changed() => true,
1484                                         }
1485                                 })
1486                         }, false,
1487                 );
1488
1489                 let t1 = tokio::spawn(bp_future);
1490                 let t2 = tokio::spawn(async move {
1491                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1492                                 let mut i = 0;
1493                                 loop {
1494                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1495                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1496                                         assert!(i < 5);
1497                                         i += 1;
1498                                 }
1499                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1500                         exit_sender.send(()).unwrap();
1501                 });
1502                 let (r1, r2) = tokio::join!(t1, t2);
1503                 r1.unwrap().unwrap();
1504                 r2.unwrap()
1505         }
1506
1507         macro_rules! do_test_payment_path_scoring {
1508                 ($nodes: expr, $receive: expr) => {
1509                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1510                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1511                         // public or else we won't score it).
1512                         // A background event handler for FundingGenerationReady events must be hooked up to a
1513                         // running background processor.
1514                         let scored_scid = 4242;
1515                         let secp_ctx = Secp256k1::new();
1516                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1517                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1518
1519                         let path = vec![RouteHop {
1520                                 pubkey: node_1_id,
1521                                 node_features: NodeFeatures::empty(),
1522                                 short_channel_id: scored_scid,
1523                                 channel_features: ChannelFeatures::empty(),
1524                                 fee_msat: 0,
1525                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1526                         }];
1527
1528                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1529                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1530                                 payment_id: None,
1531                                 payment_hash: PaymentHash([42; 32]),
1532                                 payment_failed_permanently: false,
1533                                 failure: PathFailure::OnPath { network_update: None },
1534                                 path: path.clone(),
1535                                 short_channel_id: Some(scored_scid),
1536                         });
1537                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1538                         match event {
1539                                 Event::PaymentPathFailed { .. } => {},
1540                                 _ => panic!("Unexpected event"),
1541                         }
1542
1543                         // Ensure we'll score payments that were explicitly failed back by the destination as
1544                         // ProbeSuccess.
1545                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1546                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1547                                 payment_id: None,
1548                                 payment_hash: PaymentHash([42; 32]),
1549                                 payment_failed_permanently: true,
1550                                 failure: PathFailure::OnPath { network_update: None },
1551                                 path: path.clone(),
1552                                 short_channel_id: None,
1553                         });
1554                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1555                         match event {
1556                                 Event::PaymentPathFailed { .. } => {},
1557                                 _ => panic!("Unexpected event"),
1558                         }
1559
1560                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1561                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1562                                 payment_id: PaymentId([42; 32]),
1563                                 payment_hash: None,
1564                                 path: path.clone(),
1565                         });
1566                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1567                         match event {
1568                                 Event::PaymentPathSuccessful { .. } => {},
1569                                 _ => panic!("Unexpected event"),
1570                         }
1571
1572                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1573                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1574                                 payment_id: PaymentId([42; 32]),
1575                                 payment_hash: PaymentHash([42; 32]),
1576                                 path: path.clone(),
1577                         });
1578                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1579                         match event {
1580                                 Event::ProbeSuccessful  { .. } => {},
1581                                 _ => panic!("Unexpected event"),
1582                         }
1583
1584                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1585                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1586                                 payment_id: PaymentId([42; 32]),
1587                                 payment_hash: PaymentHash([42; 32]),
1588                                 path,
1589                                 short_channel_id: Some(scored_scid),
1590                         });
1591                         let event = $receive.expect("ProbeFailure not handled within deadline");
1592                         match event {
1593                                 Event::ProbeFailed { .. } => {},
1594                                 _ => panic!("Unexpected event"),
1595                         }
1596                 }
1597         }
1598
1599         #[test]
1600         fn test_payment_path_scoring() {
1601                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1602                 let event_handler = move |event: Event| match event {
1603                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1604                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1605                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1606                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1607                         _ => panic!("Unexpected event: {:?}", event),
1608                 };
1609
1610                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1611                 let data_dir = nodes[0].persister.get_data_dir();
1612                 let persister = Arc::new(Persister::new(data_dir));
1613                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1614
1615                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1616
1617                 if !std::thread::panicking() {
1618                         bg_processor.stop().unwrap();
1619                 }
1620         }
1621
1622         #[tokio::test]
1623         #[cfg(feature = "futures")]
1624         async fn test_payment_path_scoring_async() {
1625                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1626                 let event_handler = move |event: Event| {
1627                         let sender_ref = sender.clone();
1628                         async move {
1629                                 match event {
1630                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1631                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1632                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1633                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1634                                         _ => panic!("Unexpected event: {:?}", event),
1635                                 }
1636                         }
1637                 };
1638
1639                 let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
1640                 let data_dir = nodes[0].persister.get_data_dir();
1641                 let persister = Arc::new(Persister::new(data_dir));
1642
1643                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1644
1645                 let bp_future = super::process_events_async(
1646                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1647                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1648                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1649                                 let mut exit_receiver = exit_receiver.clone();
1650                                 Box::pin(async move {
1651                                         tokio::select! {
1652                                                 _ = tokio::time::sleep(dur) => false,
1653                                                 _ = exit_receiver.changed() => true,
1654                                         }
1655                                 })
1656                         }, false,
1657                 );
1658                 let t1 = tokio::spawn(bp_future);
1659                 let t2 = tokio::spawn(async move {
1660                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1661                         exit_sender.send(()).unwrap();
1662                 });
1663
1664                 let (r1, r2) = tokio::join!(t1, t2);
1665                 r1.unwrap().unwrap();
1666                 r2.unwrap()
1667         }
1668 }