Add blinded path {metadata} fields to Path, but disallow paying blinded paths for now
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
68 ///   at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(feature = "futures")]
120 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
121 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
122 #[cfg(feature = "futures")]
123 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
124         min_u64(SCORER_PERSIST_TIMER, FIRST_NETWORK_PRUNE_TIMER));
125
126 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
127 pub enum GossipSync<
128         P: Deref<Target = P2PGossipSync<G, U, L>>,
129         R: Deref<Target = RapidGossipSync<G, L>>,
130         G: Deref<Target = NetworkGraph<L>>,
131         U: Deref,
132         L: Deref,
133 >
134 where U::Target: UtxoLookup, L::Target: Logger {
135         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
136         P2P(P),
137         /// Rapid gossip sync from a trusted server.
138         Rapid(R),
139         /// No gossip sync.
140         None,
141 }
142
143 impl<
144         P: Deref<Target = P2PGossipSync<G, U, L>>,
145         R: Deref<Target = RapidGossipSync<G, L>>,
146         G: Deref<Target = NetworkGraph<L>>,
147         U: Deref,
148         L: Deref,
149 > GossipSync<P, R, G, U, L>
150 where U::Target: UtxoLookup, L::Target: Logger {
151         fn network_graph(&self) -> Option<&G> {
152                 match self {
153                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
154                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
155                         GossipSync::None => None,
156                 }
157         }
158
159         fn prunable_network_graph(&self) -> Option<&G> {
160                 match self {
161                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
162                         GossipSync::Rapid(gossip_sync) => {
163                                 if gossip_sync.is_initial_sync_complete() {
164                                         Some(gossip_sync.network_graph())
165                                 } else {
166                                         None
167                                 }
168                         },
169                         GossipSync::None => None,
170                 }
171         }
172 }
173
174 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
175 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
176         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
177 where
178         U::Target: UtxoLookup,
179         L::Target: Logger,
180 {
181         /// Initializes a new [`GossipSync::P2P`] variant.
182         pub fn p2p(gossip_sync: P) -> Self {
183                 GossipSync::P2P(gossip_sync)
184         }
185 }
186
187 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
188 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
189         GossipSync<
190                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
191                 R,
192                 G,
193                 &'a (dyn UtxoLookup + Send + Sync),
194                 L,
195         >
196 where
197         L::Target: Logger,
198 {
199         /// Initializes a new [`GossipSync::Rapid`] variant.
200         pub fn rapid(gossip_sync: R) -> Self {
201                 GossipSync::Rapid(gossip_sync)
202         }
203 }
204
205 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
206 impl<'a, L: Deref>
207         GossipSync<
208                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
209                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
210                 &'a NetworkGraph<L>,
211                 &'a (dyn UtxoLookup + Send + Sync),
212                 L,
213         >
214 where
215         L::Target: Logger,
216 {
217         /// Initializes a new [`GossipSync::None`] variant.
218         pub fn none() -> Self {
219                 GossipSync::None
220         }
221 }
222
223 fn handle_network_graph_update<L: Deref>(
224         network_graph: &NetworkGraph<L>, event: &Event
225 ) where L::Target: Logger {
226         if let Event::PaymentPathFailed {
227                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
228         {
229                 network_graph.handle_network_update(upd);
230         }
231 }
232
233 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
234         scorer: &'a S, event: &Event
235 ) {
236         let mut score = scorer.lock();
237         match event {
238                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
239                         score.payment_path_failed(path, *scid);
240                 },
241                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
242                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
243                         // because the payment made it all the way to the destination with sufficient liquidity.
244                         score.probe_successful(path);
245                 },
246                 Event::PaymentPathSuccessful { path, .. } => {
247                         score.payment_path_successful(path);
248                 },
249                 Event::ProbeSuccessful { path, .. } => {
250                         score.probe_successful(path);
251                 },
252                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
253                         score.probe_failed(path, *scid);
254                 },
255                 _ => {},
256         }
257 }
258
259 macro_rules! define_run_body {
260         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
261          $channel_manager: ident, $process_channel_manager_events: expr,
262          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
263          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
264          $check_slow_await: expr)
265         => { {
266                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
267                 $channel_manager.timer_tick_occurred();
268
269                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
270                 let mut last_ping_call = $get_timer(PING_TIMER);
271                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
272                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
273                 let mut have_pruned = false;
274
275                 loop {
276                         $process_channel_manager_events;
277                         $process_chain_monitor_events;
278
279                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
280                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
281                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
282                         // without running the normal event processing above and handing events to users.
283                         //
284                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
285                         // processing a message effectively at any point during this loop. In order to
286                         // minimize the time between such processing completing and persisting the updated
287                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
288                         // generally, and as a fallback place such blocking only immediately before
289                         // persistence.
290                         $peer_manager.process_events();
291
292                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
293                         // see `await_start`'s use below.
294                         let mut await_start = None;
295                         if $check_slow_await { await_start = Some($get_timer(1)); }
296                         let updates_available = $await;
297                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
298
299                         if updates_available {
300                                 log_trace!($logger, "Persisting ChannelManager...");
301                                 $persister.persist_manager(&*$channel_manager)?;
302                                 log_trace!($logger, "Done persisting ChannelManager.");
303                         }
304                         // Exit the loop if the background processor was requested to stop.
305                         if $loop_exit_check {
306                                 log_trace!($logger, "Terminating background processor.");
307                                 break;
308                         }
309                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
310                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
311                                 $channel_manager.timer_tick_occurred();
312                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
313                         }
314                         if await_slow {
315                                 // On various platforms, we may be starved of CPU cycles for several reasons.
316                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
317                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
318                                 // may not get any cycles.
319                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
320                                 // full second, at which point we assume sockets may have been killed (they
321                                 // appear to be at least on some platforms, even if it has only been a second).
322                                 // Note that we have to take care to not get here just because user event
323                                 // processing was slow at the top of the loop. For example, the sample client
324                                 // may call Bitcoin Core RPCs during event handling, which very often takes
325                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
326                                 // peers.
327                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
328                                 $peer_manager.disconnect_all_peers();
329                                 last_ping_call = $get_timer(PING_TIMER);
330                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
331                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
332                                 $peer_manager.timer_tick_occurred();
333                                 last_ping_call = $get_timer(PING_TIMER);
334                         }
335
336                         // Note that we want to run a graph prune once not long after startup before
337                         // falling back to our usual hourly prunes. This avoids short-lived clients never
338                         // pruning their network graph. We run once 60 seconds after startup before
339                         // continuing our normal cadence.
340                         if $timer_elapsed(&mut last_prune_call, if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }) {
341                                 // The network graph must not be pruned while rapid sync completion is pending
342                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
343                                         #[cfg(feature = "std")] {
344                                                 log_trace!($logger, "Pruning and persisting network graph.");
345                                                 network_graph.remove_stale_channels_and_tracking();
346                                         }
347                                         #[cfg(not(feature = "std"))] {
348                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
349                                                 log_trace!($logger, "Persisting network graph.");
350                                         }
351
352                                         if let Err(e) = $persister.persist_graph(network_graph) {
353                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
354                                         }
355
356                                         have_pruned = true;
357                                 }
358                                 last_prune_call = $get_timer(NETWORK_PRUNE_TIMER);
359                         }
360
361                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
362                                 if let Some(ref scorer) = $scorer {
363                                         log_trace!($logger, "Persisting scorer");
364                                         if let Err(e) = $persister.persist_scorer(&scorer) {
365                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
366                                         }
367                                 }
368                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
369                         }
370                 }
371
372                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
373                 // some races where users quit while channel updates were in-flight, with
374                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
375                 $persister.persist_manager(&*$channel_manager)?;
376
377                 // Persist Scorer on exit
378                 if let Some(ref scorer) = $scorer {
379                         $persister.persist_scorer(&scorer)?;
380                 }
381
382                 // Persist NetworkGraph on exit
383                 if let Some(network_graph) = $gossip_sync.network_graph() {
384                         $persister.persist_graph(network_graph)?;
385                 }
386
387                 Ok(())
388         } }
389 }
390
391 #[cfg(feature = "futures")]
392 pub(crate) mod futures_util {
393         use core::future::Future;
394         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
395         use core::pin::Pin;
396         use core::marker::Unpin;
397         pub(crate) struct Selector<
398                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
399         > {
400                 pub a: A,
401                 pub b: B,
402                 pub c: C,
403         }
404         pub(crate) enum SelectorOutput {
405                 A, B, C(bool),
406         }
407
408         impl<
409                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
410         > Future for Selector<A, B, C> {
411                 type Output = SelectorOutput;
412                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
413                         match Pin::new(&mut self.a).poll(ctx) {
414                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
415                                 Poll::Pending => {},
416                         }
417                         match Pin::new(&mut self.b).poll(ctx) {
418                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
419                                 Poll::Pending => {},
420                         }
421                         match Pin::new(&mut self.c).poll(ctx) {
422                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
423                                 Poll::Pending => {},
424                         }
425                         Poll::Pending
426                 }
427         }
428
429         // If we want to poll a future without an async context to figure out if it has completed or
430         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
431         // but sadly there's a good bit of boilerplate here.
432         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
433         fn dummy_waker_action(_: *const ()) { }
434
435         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
436                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
437         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
438 }
439 #[cfg(feature = "futures")]
440 use futures_util::{Selector, SelectorOutput, dummy_waker};
441 #[cfg(feature = "futures")]
442 use core::task;
443
444 /// Processes background events in a future.
445 ///
446 /// `sleeper` should return a future which completes in the given amount of time and returns a
447 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
448 /// future which outputs true, the loop will exit and this function's future will complete.
449 ///
450 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
451 ///
452 /// Requires the `futures` feature. Note that while this method is available without the `std`
453 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
454 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
455 /// manually instead.
456 ///
457 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
458 /// mobile device, where we may need to check for interruption of the application regularly. If you
459 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
460 /// are hundreds or thousands of simultaneous process calls running.
461 #[cfg(feature = "futures")]
462 pub async fn process_events_async<
463         'a,
464         UL: 'static + Deref + Send + Sync,
465         CF: 'static + Deref + Send + Sync,
466         CW: 'static + Deref + Send + Sync,
467         T: 'static + Deref + Send + Sync,
468         ES: 'static + Deref + Send + Sync,
469         NS: 'static + Deref + Send + Sync,
470         SP: 'static + Deref + Send + Sync,
471         F: 'static + Deref + Send + Sync,
472         R: 'static + Deref + Send + Sync,
473         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
474         L: 'static + Deref + Send + Sync,
475         P: 'static + Deref + Send + Sync,
476         Descriptor: 'static + SocketDescriptor + Send + Sync,
477         CMH: 'static + Deref + Send + Sync,
478         RMH: 'static + Deref + Send + Sync,
479         OMH: 'static + Deref + Send + Sync,
480         EventHandlerFuture: core::future::Future<Output = ()>,
481         EventHandler: Fn(Event) -> EventHandlerFuture,
482         PS: 'static + Deref + Send,
483         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
484         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
485         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
486         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
487         UMH: 'static + Deref + Send + Sync,
488         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
489         S: 'static + Deref<Target = SC> + Send + Sync,
490         SC: for<'b> WriteableScore<'b>,
491         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
492         Sleeper: Fn(Duration) -> SleepFuture
493 >(
494         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
495         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
496         sleeper: Sleeper, mobile_interruptable_platform: bool,
497 ) -> Result<(), lightning::io::Error>
498 where
499         UL::Target: 'static + UtxoLookup,
500         CF::Target: 'static + chain::Filter,
501         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
502         T::Target: 'static + BroadcasterInterface,
503         ES::Target: 'static + EntropySource,
504         NS::Target: 'static + NodeSigner,
505         SP::Target: 'static + SignerProvider,
506         F::Target: 'static + FeeEstimator,
507         R::Target: 'static + Router,
508         L::Target: 'static + Logger,
509         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
510         CMH::Target: 'static + ChannelMessageHandler,
511         OMH::Target: 'static + OnionMessageHandler,
512         RMH::Target: 'static + RoutingMessageHandler,
513         UMH::Target: 'static + CustomMessageHandler,
514         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
515 {
516         let mut should_break = false;
517         let async_event_handler = |event| {
518                 let network_graph = gossip_sync.network_graph();
519                 let event_handler = &event_handler;
520                 let scorer = &scorer;
521                 async move {
522                         if let Some(network_graph) = network_graph {
523                                 handle_network_graph_update(network_graph, &event)
524                         }
525                         if let Some(ref scorer) = scorer {
526                                 update_scorer(scorer, &event);
527                         }
528                         event_handler(event).await;
529                 }
530         };
531         define_run_body!(persister,
532                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
533                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
534                 gossip_sync, peer_manager, logger, scorer, should_break, {
535                         let fut = Selector {
536                                 a: channel_manager.get_persistable_update_future(),
537                                 b: chain_monitor.get_update_future(),
538                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
539                         };
540                         match fut.await {
541                                 SelectorOutput::A => true,
542                                 SelectorOutput::B => false,
543                                 SelectorOutput::C(exit) => {
544                                         should_break = exit;
545                                         false
546                                 }
547                         }
548                 }, |t| sleeper(Duration::from_secs(t)),
549                 |fut: &mut SleepFuture, _| {
550                         let mut waker = dummy_waker();
551                         let mut ctx = task::Context::from_waker(&mut waker);
552                         match core::pin::Pin::new(fut).poll(&mut ctx) {
553                                 task::Poll::Ready(exit) => { should_break = exit; true },
554                                 task::Poll::Pending => false,
555                         }
556                 }, mobile_interruptable_platform)
557 }
558
559 #[cfg(feature = "std")]
560 impl BackgroundProcessor {
561         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
562         /// documentation].
563         ///
564         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
565         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
566         /// either [`join`] or [`stop`].
567         ///
568         /// # Data Persistence
569         ///
570         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
571         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
572         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
573         /// provided implementation.
574         ///
575         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
576         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
577         /// See the `lightning-persister` crate for LDK's provided implementation.
578         ///
579         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
580         /// error or call [`join`] and handle any error that may arise. For the latter case,
581         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
582         ///
583         /// # Event Handling
584         ///
585         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
586         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
587         /// functionality implemented by other handlers.
588         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
589         ///
590         /// # Rapid Gossip Sync
591         ///
592         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
593         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
594         /// until the [`RapidGossipSync`] instance completes its first sync.
595         ///
596         /// [top-level documentation]: BackgroundProcessor
597         /// [`join`]: Self::join
598         /// [`stop`]: Self::stop
599         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
600         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
601         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
602         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
603         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
604         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
605         pub fn start<
606                 'a,
607                 UL: 'static + Deref + Send + Sync,
608                 CF: 'static + Deref + Send + Sync,
609                 CW: 'static + Deref + Send + Sync,
610                 T: 'static + Deref + Send + Sync,
611                 ES: 'static + Deref + Send + Sync,
612                 NS: 'static + Deref + Send + Sync,
613                 SP: 'static + Deref + Send + Sync,
614                 F: 'static + Deref + Send + Sync,
615                 R: 'static + Deref + Send + Sync,
616                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
617                 L: 'static + Deref + Send + Sync,
618                 P: 'static + Deref + Send + Sync,
619                 Descriptor: 'static + SocketDescriptor + Send + Sync,
620                 CMH: 'static + Deref + Send + Sync,
621                 OMH: 'static + Deref + Send + Sync,
622                 RMH: 'static + Deref + Send + Sync,
623                 EH: 'static + EventHandler + Send,
624                 PS: 'static + Deref + Send,
625                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
626                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
627                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
628                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
629                 UMH: 'static + Deref + Send + Sync,
630                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
631                 S: 'static + Deref<Target = SC> + Send + Sync,
632                 SC: for <'b> WriteableScore<'b>,
633         >(
634                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
635                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
636         ) -> Self
637         where
638                 UL::Target: 'static + UtxoLookup,
639                 CF::Target: 'static + chain::Filter,
640                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
641                 T::Target: 'static + BroadcasterInterface,
642                 ES::Target: 'static + EntropySource,
643                 NS::Target: 'static + NodeSigner,
644                 SP::Target: 'static + SignerProvider,
645                 F::Target: 'static + FeeEstimator,
646                 R::Target: 'static + Router,
647                 L::Target: 'static + Logger,
648                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
649                 CMH::Target: 'static + ChannelMessageHandler,
650                 OMH::Target: 'static + OnionMessageHandler,
651                 RMH::Target: 'static + RoutingMessageHandler,
652                 UMH::Target: 'static + CustomMessageHandler,
653                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
654         {
655                 let stop_thread = Arc::new(AtomicBool::new(false));
656                 let stop_thread_clone = stop_thread.clone();
657                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
658                         let event_handler = |event| {
659                                 let network_graph = gossip_sync.network_graph();
660                                 if let Some(network_graph) = network_graph {
661                                         handle_network_graph_update(network_graph, &event)
662                                 }
663                                 if let Some(ref scorer) = scorer {
664                                         update_scorer(scorer, &event);
665                                 }
666                                 event_handler.handle_event(event);
667                         };
668                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
669                                 channel_manager, channel_manager.process_pending_events(&event_handler),
670                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
671                                 Sleeper::from_two_futures(
672                                         channel_manager.get_persistable_update_future(),
673                                         chain_monitor.get_update_future()
674                                 ).wait_timeout(Duration::from_millis(100)),
675                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
676                 });
677                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
678         }
679
680         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
681         /// [`ChannelManager`].
682         ///
683         /// # Panics
684         ///
685         /// This function panics if the background thread has panicked such as while persisting or
686         /// handling events.
687         ///
688         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
689         pub fn join(mut self) -> Result<(), std::io::Error> {
690                 assert!(self.thread_handle.is_some());
691                 self.join_thread()
692         }
693
694         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
695         /// [`ChannelManager`].
696         ///
697         /// # Panics
698         ///
699         /// This function panics if the background thread has panicked such as while persisting or
700         /// handling events.
701         ///
702         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
703         pub fn stop(mut self) -> Result<(), std::io::Error> {
704                 assert!(self.thread_handle.is_some());
705                 self.stop_and_join_thread()
706         }
707
708         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
709                 self.stop_thread.store(true, Ordering::Release);
710                 self.join_thread()
711         }
712
713         fn join_thread(&mut self) -> Result<(), std::io::Error> {
714                 match self.thread_handle.take() {
715                         Some(handle) => handle.join().unwrap(),
716                         None => Ok(()),
717                 }
718         }
719 }
720
721 #[cfg(feature = "std")]
722 impl Drop for BackgroundProcessor {
723         fn drop(&mut self) {
724                 self.stop_and_join_thread().unwrap();
725         }
726 }
727
728 #[cfg(all(feature = "std", test))]
729 mod tests {
730         use bitcoin::blockdata::block::BlockHeader;
731         use bitcoin::blockdata::constants::genesis_block;
732         use bitcoin::blockdata::locktime::PackedLockTime;
733         use bitcoin::blockdata::transaction::{Transaction, TxOut};
734         use bitcoin::network::constants::Network;
735         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
736         use lightning::chain::{BestBlock, Confirm, chainmonitor};
737         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
738         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
739         use lightning::chain::transaction::OutPoint;
740         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
741         use lightning::{get_event_msg, get_event};
742         use lightning::ln::PaymentHash;
743         use lightning::ln::channelmanager;
744         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
745         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
746         use lightning::ln::msgs::{ChannelMessageHandler, Init};
747         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
748         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
749         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
750         use lightning::routing::scoring::{ChannelUsage, Score};
751         use lightning::util::config::UserConfig;
752         use lightning::util::ser::Writeable;
753         use lightning::util::test_utils;
754         use lightning::util::persist::KVStorePersister;
755         use lightning_persister::FilesystemPersister;
756         use std::collections::VecDeque;
757         use std::fs;
758         use std::path::PathBuf;
759         use std::sync::{Arc, Mutex};
760         use std::sync::mpsc::SyncSender;
761         use std::time::Duration;
762         use bitcoin::hashes::Hash;
763         use bitcoin::TxMerkleNode;
764         use lightning_rapid_gossip_sync::RapidGossipSync;
765         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
766
767         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
768
769         #[derive(Clone, Hash, PartialEq, Eq)]
770         struct TestDescriptor{}
771         impl SocketDescriptor for TestDescriptor {
772                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
773                         0
774                 }
775
776                 fn disconnect_socket(&mut self) {}
777         }
778
779         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
780
781         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
782
783         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
784         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
785
786         struct Node {
787                 node: Arc<ChannelManager>,
788                 p2p_gossip_sync: PGS,
789                 rapid_gossip_sync: RGS,
790                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
791                 chain_monitor: Arc<ChainMonitor>,
792                 persister: Arc<FilesystemPersister>,
793                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
794                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
795                 logger: Arc<test_utils::TestLogger>,
796                 best_block: BestBlock,
797                 scorer: Arc<Mutex<TestScorer>>,
798         }
799
800         impl Node {
801                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
802                         GossipSync::P2P(self.p2p_gossip_sync.clone())
803                 }
804
805                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
806                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
807                 }
808
809                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
810                         GossipSync::None
811                 }
812         }
813
814         impl Drop for Node {
815                 fn drop(&mut self) {
816                         let data_dir = self.persister.get_data_dir();
817                         match fs::remove_dir_all(data_dir.clone()) {
818                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
819                                 _ => {}
820                         }
821                 }
822         }
823
824         struct Persister {
825                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
826                 graph_persistence_notifier: Option<SyncSender<()>>,
827                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
828                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
829                 filesystem_persister: FilesystemPersister,
830         }
831
832         impl Persister {
833                 fn new(data_dir: String) -> Self {
834                         let filesystem_persister = FilesystemPersister::new(data_dir);
835                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
836                 }
837
838                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
839                         Self { graph_error: Some((error, message)), ..self }
840                 }
841
842                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
843                         Self { graph_persistence_notifier: Some(sender), ..self }
844                 }
845
846                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
847                         Self { manager_error: Some((error, message)), ..self }
848                 }
849
850                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
851                         Self { scorer_error: Some((error, message)), ..self }
852                 }
853         }
854
855         impl KVStorePersister for Persister {
856                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
857                         if key == "manager" {
858                                 if let Some((error, message)) = self.manager_error {
859                                         return Err(std::io::Error::new(error, message))
860                                 }
861                         }
862
863                         if key == "network_graph" {
864                                 if let Some(sender) = &self.graph_persistence_notifier {
865                                         sender.send(()).unwrap();
866                                 };
867
868                                 if let Some((error, message)) = self.graph_error {
869                                         return Err(std::io::Error::new(error, message))
870                                 }
871                         }
872
873                         if key == "scorer" {
874                                 if let Some((error, message)) = self.scorer_error {
875                                         return Err(std::io::Error::new(error, message))
876                                 }
877                         }
878
879                         self.filesystem_persister.persist(key, object)
880                 }
881         }
882
883         struct TestScorer {
884                 event_expectations: Option<VecDeque<TestResult>>,
885         }
886
887         #[derive(Debug)]
888         enum TestResult {
889                 PaymentFailure { path: Path, short_channel_id: u64 },
890                 PaymentSuccess { path: Path },
891                 ProbeFailure { path: Path },
892                 ProbeSuccess { path: Path },
893         }
894
895         impl TestScorer {
896                 fn new() -> Self {
897                         Self { event_expectations: None }
898                 }
899
900                 fn expect(&mut self, expectation: TestResult) {
901                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
902                 }
903         }
904
905         impl lightning::util::ser::Writeable for TestScorer {
906                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
907         }
908
909         impl Score for TestScorer {
910                 fn channel_penalty_msat(
911                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
912                 ) -> u64 { unimplemented!(); }
913
914                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
915                         if let Some(expectations) = &mut self.event_expectations {
916                                 match expectations.pop_front().unwrap() {
917                                         TestResult::PaymentFailure { path, short_channel_id } => {
918                                                 assert_eq!(actual_path, &path);
919                                                 assert_eq!(actual_short_channel_id, short_channel_id);
920                                         },
921                                         TestResult::PaymentSuccess { path } => {
922                                                 panic!("Unexpected successful payment path: {:?}", path)
923                                         },
924                                         TestResult::ProbeFailure { path } => {
925                                                 panic!("Unexpected probe failure: {:?}", path)
926                                         },
927                                         TestResult::ProbeSuccess { path } => {
928                                                 panic!("Unexpected probe success: {:?}", path)
929                                         }
930                                 }
931                         }
932                 }
933
934                 fn payment_path_successful(&mut self, actual_path: &Path) {
935                         if let Some(expectations) = &mut self.event_expectations {
936                                 match expectations.pop_front().unwrap() {
937                                         TestResult::PaymentFailure { path, .. } => {
938                                                 panic!("Unexpected payment path failure: {:?}", path)
939                                         },
940                                         TestResult::PaymentSuccess { path } => {
941                                                 assert_eq!(actual_path, &path);
942                                         },
943                                         TestResult::ProbeFailure { path } => {
944                                                 panic!("Unexpected probe failure: {:?}", path)
945                                         },
946                                         TestResult::ProbeSuccess { path } => {
947                                                 panic!("Unexpected probe success: {:?}", path)
948                                         }
949                                 }
950                         }
951                 }
952
953                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
954                         if let Some(expectations) = &mut self.event_expectations {
955                                 match expectations.pop_front().unwrap() {
956                                         TestResult::PaymentFailure { path, .. } => {
957                                                 panic!("Unexpected payment path failure: {:?}", path)
958                                         },
959                                         TestResult::PaymentSuccess { path } => {
960                                                 panic!("Unexpected payment path success: {:?}", path)
961                                         },
962                                         TestResult::ProbeFailure { path } => {
963                                                 assert_eq!(actual_path, &path);
964                                         },
965                                         TestResult::ProbeSuccess { path } => {
966                                                 panic!("Unexpected probe success: {:?}", path)
967                                         }
968                                 }
969                         }
970                 }
971                 fn probe_successful(&mut self, actual_path: &Path) {
972                         if let Some(expectations) = &mut self.event_expectations {
973                                 match expectations.pop_front().unwrap() {
974                                         TestResult::PaymentFailure { path, .. } => {
975                                                 panic!("Unexpected payment path failure: {:?}", path)
976                                         },
977                                         TestResult::PaymentSuccess { path } => {
978                                                 panic!("Unexpected payment path success: {:?}", path)
979                                         },
980                                         TestResult::ProbeFailure { path } => {
981                                                 panic!("Unexpected probe failure: {:?}", path)
982                                         },
983                                         TestResult::ProbeSuccess { path } => {
984                                                 assert_eq!(actual_path, &path);
985                                         }
986                                 }
987                         }
988                 }
989         }
990
991         impl Drop for TestScorer {
992                 fn drop(&mut self) {
993                         if std::thread::panicking() {
994                                 return;
995                         }
996
997                         if let Some(event_expectations) = &self.event_expectations {
998                                 if !event_expectations.is_empty() {
999                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1000                                 }
1001                         }
1002                 }
1003         }
1004
1005         fn get_full_filepath(filepath: String, filename: String) -> String {
1006                 let mut path = PathBuf::from(filepath);
1007                 path.push(filename);
1008                 path.to_str().unwrap().to_string()
1009         }
1010
1011         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1012                 let mut nodes = Vec::new();
1013                 for i in 0..num_nodes {
1014                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
1015                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1016                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1017                         let network = Network::Testnet;
1018                         let genesis_block = genesis_block(network);
1019                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1020                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1021                         let seed = [i as u8; 32];
1022                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1023                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1024                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1025                         let now = Duration::from_secs(genesis_block.header.time as u64);
1026                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1027                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1028                         let best_block = BestBlock::from_network(network);
1029                         let params = ChainParameters { network, best_block };
1030                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1031                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1032                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1033                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1034                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1035                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1036                         nodes.push(node);
1037                 }
1038
1039                 for i in 0..num_nodes {
1040                         for j in (i+1)..num_nodes {
1041                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1042                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1043                         }
1044                 }
1045
1046                 nodes
1047         }
1048
1049         macro_rules! open_channel {
1050                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1051                         begin_open_channel!($node_a, $node_b, $channel_value);
1052                         let events = $node_a.node.get_and_clear_pending_events();
1053                         assert_eq!(events.len(), 1);
1054                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1055                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1056                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1057                         get_event!($node_b, Event::ChannelPending);
1058                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1059                         get_event!($node_a, Event::ChannelPending);
1060                         tx
1061                 }}
1062         }
1063
1064         macro_rules! begin_open_channel {
1065                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1066                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1067                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1068                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1069                 }}
1070         }
1071
1072         macro_rules! handle_funding_generation_ready {
1073                 ($event: expr, $channel_value: expr) => {{
1074                         match $event {
1075                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1076                                         assert_eq!(channel_value_satoshis, $channel_value);
1077                                         assert_eq!(user_channel_id, 42);
1078
1079                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1080                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1081                                         }]};
1082                                         (temporary_channel_id, tx)
1083                                 },
1084                                 _ => panic!("Unexpected event"),
1085                         }
1086                 }}
1087         }
1088
1089         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1090                 for i in 1..=depth {
1091                         let prev_blockhash = node.best_block.block_hash();
1092                         let height = node.best_block.height() + 1;
1093                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1094                         let txdata = vec![(0, tx)];
1095                         node.best_block = BestBlock::new(header.block_hash(), height);
1096                         match i {
1097                                 1 => {
1098                                         node.node.transactions_confirmed(&header, &txdata, height);
1099                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1100                                 },
1101                                 x if x == depth => {
1102                                         node.node.best_block_updated(&header, height);
1103                                         node.chain_monitor.best_block_updated(&header, height);
1104                                 },
1105                                 _ => {},
1106                         }
1107                 }
1108         }
1109         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1110                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1111         }
1112
1113         #[test]
1114         fn test_background_processor() {
1115                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1116                 // updates. Also test that when new updates are available, the manager signals that it needs
1117                 // re-persistence and is successfully re-persisted.
1118                 let nodes = create_nodes(2, "test_background_processor".to_string());
1119
1120                 // Go through the channel creation process so that each node has something to persist. Since
1121                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1122                 // avoid a race with processing events.
1123                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1124
1125                 // Initiate the background processors to watch each node.
1126                 let data_dir = nodes[0].persister.get_data_dir();
1127                 let persister = Arc::new(Persister::new(data_dir));
1128                 let event_handler = |_: _| {};
1129                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1130
1131                 macro_rules! check_persisted_data {
1132                         ($node: expr, $filepath: expr) => {
1133                                 let mut expected_bytes = Vec::new();
1134                                 loop {
1135                                         expected_bytes.clear();
1136                                         match $node.write(&mut expected_bytes) {
1137                                                 Ok(()) => {
1138                                                         match std::fs::read($filepath) {
1139                                                                 Ok(bytes) => {
1140                                                                         if bytes == expected_bytes {
1141                                                                                 break
1142                                                                         } else {
1143                                                                                 continue
1144                                                                         }
1145                                                                 },
1146                                                                 Err(_) => continue
1147                                                         }
1148                                                 },
1149                                                 Err(e) => panic!("Unexpected error: {}", e)
1150                                         }
1151                                 }
1152                         }
1153                 }
1154
1155                 // Check that the initial channel manager data is persisted as expected.
1156                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1157                 check_persisted_data!(nodes[0].node, filepath.clone());
1158
1159                 loop {
1160                         if !nodes[0].node.get_persistence_condvar_value() { break }
1161                 }
1162
1163                 // Force-close the channel.
1164                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1165
1166                 // Check that the force-close updates are persisted.
1167                 check_persisted_data!(nodes[0].node, filepath.clone());
1168                 loop {
1169                         if !nodes[0].node.get_persistence_condvar_value() { break }
1170                 }
1171
1172                 // Check network graph is persisted
1173                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1174                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1175
1176                 // Check scorer is persisted
1177                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1178                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1179
1180                 if !std::thread::panicking() {
1181                         bg_processor.stop().unwrap();
1182                 }
1183         }
1184
1185         #[test]
1186         fn test_timer_tick_called() {
1187                 // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
1188                 // `FRESHNESS_TIMER`.
1189                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1190                 let data_dir = nodes[0].persister.get_data_dir();
1191                 let persister = Arc::new(Persister::new(data_dir));
1192                 let event_handler = |_: _| {};
1193                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1194                 loop {
1195                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1196                         let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
1197                         let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
1198                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
1199                                         log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
1200                                 break
1201                         }
1202                 }
1203
1204                 if !std::thread::panicking() {
1205                         bg_processor.stop().unwrap();
1206                 }
1207         }
1208
1209         #[test]
1210         fn test_channel_manager_persist_error() {
1211                 // Test that if we encounter an error during manager persistence, the thread panics.
1212                 let nodes = create_nodes(2, "test_persist_error".to_string());
1213                 open_channel!(nodes[0], nodes[1], 100000);
1214
1215                 let data_dir = nodes[0].persister.get_data_dir();
1216                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1217                 let event_handler = |_: _| {};
1218                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1219                 match bg_processor.join() {
1220                         Ok(_) => panic!("Expected error persisting manager"),
1221                         Err(e) => {
1222                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1223                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1224                         },
1225                 }
1226         }
1227
1228         #[tokio::test]
1229         #[cfg(feature = "futures")]
1230         async fn test_channel_manager_persist_error_async() {
1231                 // Test that if we encounter an error during manager persistence, the thread panics.
1232                 let nodes = create_nodes(2, "test_persist_error_sync".to_string());
1233                 open_channel!(nodes[0], nodes[1], 100000);
1234
1235                 let data_dir = nodes[0].persister.get_data_dir();
1236                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1237
1238                 let bp_future = super::process_events_async(
1239                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1240                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1241                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1242                                 Box::pin(async move {
1243                                         tokio::time::sleep(dur).await;
1244                                         false // Never exit
1245                                 })
1246                         }, false,
1247                 );
1248                 match bp_future.await {
1249                         Ok(_) => panic!("Expected error persisting manager"),
1250                         Err(e) => {
1251                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1252                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1253                         },
1254                 }
1255         }
1256
1257         #[test]
1258         fn test_network_graph_persist_error() {
1259                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1260                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1261                 let data_dir = nodes[0].persister.get_data_dir();
1262                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1263                 let event_handler = |_: _| {};
1264                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1265
1266                 match bg_processor.stop() {
1267                         Ok(_) => panic!("Expected error persisting network graph"),
1268                         Err(e) => {
1269                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1270                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1271                         },
1272                 }
1273         }
1274
1275         #[test]
1276         fn test_scorer_persist_error() {
1277                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1278                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1279                 let data_dir = nodes[0].persister.get_data_dir();
1280                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1281                 let event_handler = |_: _| {};
1282                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1283
1284                 match bg_processor.stop() {
1285                         Ok(_) => panic!("Expected error persisting scorer"),
1286                         Err(e) => {
1287                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1288                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1289                         },
1290                 }
1291         }
1292
1293         #[test]
1294         fn test_background_event_handling() {
1295                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1296                 let channel_value = 100000;
1297                 let data_dir = nodes[0].persister.get_data_dir();
1298                 let persister = Arc::new(Persister::new(data_dir.clone()));
1299
1300                 // Set up a background event handler for FundingGenerationReady events.
1301                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1302                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1303                 let event_handler = move |event: Event| match event {
1304                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1305                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1306                         Event::ChannelReady { .. } => {},
1307                         _ => panic!("Unexpected event: {:?}", event),
1308                 };
1309
1310                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1311
1312                 // Open a channel and check that the FundingGenerationReady event was handled.
1313                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1314                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1315                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1316                         .expect("FundingGenerationReady not handled within deadline");
1317                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1318                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1319                 get_event!(nodes[1], Event::ChannelPending);
1320                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1321                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1322                         .expect("ChannelPending not handled within deadline");
1323
1324                 // Confirm the funding transaction.
1325                 confirm_transaction(&mut nodes[0], &funding_tx);
1326                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1327                 confirm_transaction(&mut nodes[1], &funding_tx);
1328                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1329                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1330                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1331                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1332                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1333
1334                 if !std::thread::panicking() {
1335                         bg_processor.stop().unwrap();
1336                 }
1337
1338                 // Set up a background event handler for SpendableOutputs events.
1339                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1340                 let event_handler = move |event: Event| match event {
1341                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1342                         Event::ChannelReady { .. } => {},
1343                         Event::ChannelClosed { .. } => {},
1344                         _ => panic!("Unexpected event: {:?}", event),
1345                 };
1346                 let persister = Arc::new(Persister::new(data_dir));
1347                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1348
1349                 // Force close the channel and check that the SpendableOutputs event was handled.
1350                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1351                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1352                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1353
1354                 let event = receiver
1355                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1356                         .expect("Events not handled within deadline");
1357                 match event {
1358                         Event::SpendableOutputs { .. } => {},
1359                         _ => panic!("Unexpected event: {:?}", event),
1360                 }
1361
1362                 if !std::thread::panicking() {
1363                         bg_processor.stop().unwrap();
1364                 }
1365         }
1366
1367         #[test]
1368         fn test_scorer_persistence() {
1369                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1370                 let data_dir = nodes[0].persister.get_data_dir();
1371                 let persister = Arc::new(Persister::new(data_dir));
1372                 let event_handler = |_: _| {};
1373                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1374
1375                 loop {
1376                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1377                         let expected_log = "Persisting scorer".to_string();
1378                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1379                                 break
1380                         }
1381                 }
1382
1383                 if !std::thread::panicking() {
1384                         bg_processor.stop().unwrap();
1385                 }
1386         }
1387
1388         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1389                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1390                         let features = ChannelFeatures::empty();
1391                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1392                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1393                         ).expect("Failed to update channel from partial announcement");
1394                         let original_graph_description = $nodes[0].network_graph.to_string();
1395                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1396                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1397
1398                         loop {
1399                                 $sleep;
1400                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1401                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1402                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1403                                         .unwrap_or(&0) > 1
1404                                 {
1405                                         // Wait until the loop has gone around at least twice.
1406                                         break
1407                                 }
1408                         }
1409
1410                         let initialization_input = vec![
1411                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1412                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1413                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1414                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1415                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1416                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1417                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1418                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1419                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1420                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1421                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1422                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1423                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1424                         ];
1425                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1426
1427                         // this should have added two channels and pruned the previous one.
1428                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1429
1430                         $receive.expect("Network graph not pruned within deadline");
1431
1432                         // all channels should now be pruned
1433                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1434                 }
1435         }
1436
1437         #[test]
1438         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1439                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1440
1441                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1442                 let data_dir = nodes[0].persister.get_data_dir();
1443                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1444
1445                 let event_handler = |_: _| {};
1446                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1447
1448                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1449                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1450                         std::thread::sleep(Duration::from_millis(1)));
1451
1452                 background_processor.stop().unwrap();
1453         }
1454
1455         #[tokio::test]
1456         #[cfg(feature = "futures")]
1457         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1458                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1459
1460                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
1461                 let data_dir = nodes[0].persister.get_data_dir();
1462                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1463
1464                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1465                 let bp_future = super::process_events_async(
1466                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1467                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1468                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1469                                 let mut exit_receiver = exit_receiver.clone();
1470                                 Box::pin(async move {
1471                                         tokio::select! {
1472                                                 _ = tokio::time::sleep(dur) => false,
1473                                                 _ = exit_receiver.changed() => true,
1474                                         }
1475                                 })
1476                         }, false,
1477                 );
1478                 // TODO: Drop _local and simply spawn after #2003
1479                 let local_set = tokio::task::LocalSet::new();
1480                 local_set.spawn_local(bp_future);
1481                 local_set.spawn_local(async move {
1482                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1483                                 let mut i = 0;
1484                                 loop {
1485                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1486                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1487                                         assert!(i < 5);
1488                                         i += 1;
1489                                 }
1490                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1491                         exit_sender.send(()).unwrap();
1492                 });
1493                 local_set.await;
1494         }
1495
1496         macro_rules! do_test_payment_path_scoring {
1497                 ($nodes: expr, $receive: expr) => {
1498                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1499                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1500                         // public or else we won't score it).
1501                         // A background event handler for FundingGenerationReady events must be hooked up to a
1502                         // running background processor.
1503                         let scored_scid = 4242;
1504                         let secp_ctx = Secp256k1::new();
1505                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1506                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1507
1508                         let path = Path { hops: vec![RouteHop {
1509                                 pubkey: node_1_id,
1510                                 node_features: NodeFeatures::empty(),
1511                                 short_channel_id: scored_scid,
1512                                 channel_features: ChannelFeatures::empty(),
1513                                 fee_msat: 0,
1514                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1515                         }], blinded_tail: None };
1516
1517                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1518                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1519                                 payment_id: None,
1520                                 payment_hash: PaymentHash([42; 32]),
1521                                 payment_failed_permanently: false,
1522                                 failure: PathFailure::OnPath { network_update: None },
1523                                 path: path.clone(),
1524                                 short_channel_id: Some(scored_scid),
1525                         });
1526                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1527                         match event {
1528                                 Event::PaymentPathFailed { .. } => {},
1529                                 _ => panic!("Unexpected event"),
1530                         }
1531
1532                         // Ensure we'll score payments that were explicitly failed back by the destination as
1533                         // ProbeSuccess.
1534                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1535                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1536                                 payment_id: None,
1537                                 payment_hash: PaymentHash([42; 32]),
1538                                 payment_failed_permanently: true,
1539                                 failure: PathFailure::OnPath { network_update: None },
1540                                 path: path.clone(),
1541                                 short_channel_id: None,
1542                         });
1543                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1544                         match event {
1545                                 Event::PaymentPathFailed { .. } => {},
1546                                 _ => panic!("Unexpected event"),
1547                         }
1548
1549                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1550                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1551                                 payment_id: PaymentId([42; 32]),
1552                                 payment_hash: None,
1553                                 path: path.clone(),
1554                         });
1555                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1556                         match event {
1557                                 Event::PaymentPathSuccessful { .. } => {},
1558                                 _ => panic!("Unexpected event"),
1559                         }
1560
1561                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1562                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1563                                 payment_id: PaymentId([42; 32]),
1564                                 payment_hash: PaymentHash([42; 32]),
1565                                 path: path.clone(),
1566                         });
1567                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1568                         match event {
1569                                 Event::ProbeSuccessful  { .. } => {},
1570                                 _ => panic!("Unexpected event"),
1571                         }
1572
1573                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1574                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1575                                 payment_id: PaymentId([42; 32]),
1576                                 payment_hash: PaymentHash([42; 32]),
1577                                 path,
1578                                 short_channel_id: Some(scored_scid),
1579                         });
1580                         let event = $receive.expect("ProbeFailure not handled within deadline");
1581                         match event {
1582                                 Event::ProbeFailed { .. } => {},
1583                                 _ => panic!("Unexpected event"),
1584                         }
1585                 }
1586         }
1587
1588         #[test]
1589         fn test_payment_path_scoring() {
1590                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1591                 let event_handler = move |event: Event| match event {
1592                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1593                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1594                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1595                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1596                         _ => panic!("Unexpected event: {:?}", event),
1597                 };
1598
1599                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1600                 let data_dir = nodes[0].persister.get_data_dir();
1601                 let persister = Arc::new(Persister::new(data_dir));
1602                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1603
1604                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1605
1606                 if !std::thread::panicking() {
1607                         bg_processor.stop().unwrap();
1608                 }
1609         }
1610
1611         #[tokio::test]
1612         #[cfg(feature = "futures")]
1613         async fn test_payment_path_scoring_async() {
1614                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1615                 let event_handler = move |event: Event| {
1616                         let sender_ref = sender.clone();
1617                         async move {
1618                                 match event {
1619                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1620                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1621                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1622                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1623                                         _ => panic!("Unexpected event: {:?}", event),
1624                                 }
1625                         }
1626                 };
1627
1628                 let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
1629                 let data_dir = nodes[0].persister.get_data_dir();
1630                 let persister = Arc::new(Persister::new(data_dir));
1631
1632                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1633
1634                 let bp_future = super::process_events_async(
1635                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1636                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1637                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1638                                 let mut exit_receiver = exit_receiver.clone();
1639                                 Box::pin(async move {
1640                                         tokio::select! {
1641                                                 _ = tokio::time::sleep(dur) => false,
1642                                                 _ = exit_receiver.changed() => true,
1643                                         }
1644                                 })
1645                         }, false,
1646                 );
1647                 // TODO: Drop _local and simply spawn after #2003
1648                 let local_set = tokio::task::LocalSet::new();
1649                 local_set.spawn_local(bp_future);
1650                 local_set.spawn_local(async move {
1651                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1652                         exit_sender.send(()).unwrap();
1653                 });
1654                 local_set.await;
1655         }
1656 }