Include `maybe_announced` field in `RouteHop`
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         match event {
245                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
246                         let mut score = scorer.write_lock();
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         let mut score = scorer.write_lock();
253                         score.probe_successful(path);
254                 },
255                 Event::PaymentPathSuccessful { path, .. } => {
256                         let mut score = scorer.write_lock();
257                         score.payment_path_successful(path);
258                 },
259                 Event::ProbeSuccessful { path, .. } => {
260                         let mut score = scorer.write_lock();
261                         score.probe_successful(path);
262                 },
263                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
264                         let mut score = scorer.write_lock();
265                         score.probe_failed(path, *scid);
266                 },
267                 _ => return false,
268         }
269         true
270 }
271
272 macro_rules! define_run_body {
273         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
274          $channel_manager: ident, $process_channel_manager_events: expr,
275          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
276          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
277          $check_slow_await: expr)
278         => { {
279                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
280                 $channel_manager.timer_tick_occurred();
281                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
282                 $chain_monitor.rebroadcast_pending_claims();
283
284                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
285                 let mut last_ping_call = $get_timer(PING_TIMER);
286                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
287                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
288                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
289                 let mut have_pruned = false;
290
291                 loop {
292                         $process_channel_manager_events;
293                         $process_chain_monitor_events;
294
295                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
296                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
297                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
298                         // without running the normal event processing above and handing events to users.
299                         //
300                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
301                         // processing a message effectively at any point during this loop. In order to
302                         // minimize the time between such processing completing and persisting the updated
303                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
304                         // generally, and as a fallback place such blocking only immediately before
305                         // persistence.
306                         $peer_manager.as_ref().process_events();
307
308                         // Exit the loop if the background processor was requested to stop.
309                         if $loop_exit_check {
310                                 log_trace!($logger, "Terminating background processor.");
311                                 break;
312                         }
313
314                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
315                         // see `await_start`'s use below.
316                         let mut await_start = None;
317                         if $check_slow_await { await_start = Some($get_timer(1)); }
318                         let updates_available = $await;
319                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
320
321                         // Exit the loop if the background processor was requested to stop.
322                         if $loop_exit_check {
323                                 log_trace!($logger, "Terminating background processor.");
324                                 break;
325                         }
326
327                         if updates_available {
328                                 log_trace!($logger, "Persisting ChannelManager...");
329                                 $persister.persist_manager(&*$channel_manager)?;
330                                 log_trace!($logger, "Done persisting ChannelManager.");
331                         }
332                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
333                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
334                                 $channel_manager.timer_tick_occurred();
335                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
336                         }
337                         if await_slow {
338                                 // On various platforms, we may be starved of CPU cycles for several reasons.
339                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
340                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
341                                 // may not get any cycles.
342                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
343                                 // full second, at which point we assume sockets may have been killed (they
344                                 // appear to be at least on some platforms, even if it has only been a second).
345                                 // Note that we have to take care to not get here just because user event
346                                 // processing was slow at the top of the loop. For example, the sample client
347                                 // may call Bitcoin Core RPCs during event handling, which very often takes
348                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
349                                 // peers.
350                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
351                                 $peer_manager.as_ref().disconnect_all_peers();
352                                 last_ping_call = $get_timer(PING_TIMER);
353                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
354                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
355                                 $peer_manager.as_ref().timer_tick_occurred();
356                                 last_ping_call = $get_timer(PING_TIMER);
357                         }
358
359                         // Note that we want to run a graph prune once not long after startup before
360                         // falling back to our usual hourly prunes. This avoids short-lived clients never
361                         // pruning their network graph. We run once 60 seconds after startup before
362                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
363                         // we prune after an initial sync completes.
364                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
365                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
366                         let should_prune = match $gossip_sync {
367                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
368                                 _ => prune_timer_elapsed,
369                         };
370                         if should_prune {
371                                 // The network graph must not be pruned while rapid sync completion is pending
372                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
373                                         #[cfg(feature = "std")] {
374                                                 log_trace!($logger, "Pruning and persisting network graph.");
375                                                 network_graph.remove_stale_channels_and_tracking();
376                                         }
377                                         #[cfg(not(feature = "std"))] {
378                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
379                                                 log_trace!($logger, "Persisting network graph.");
380                                         }
381
382                                         if let Err(e) = $persister.persist_graph(network_graph) {
383                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
384                                         }
385
386                                         have_pruned = true;
387                                 }
388                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
389                                 last_prune_call = $get_timer(prune_timer);
390                         }
391
392                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
393                                 if let Some(ref scorer) = $scorer {
394                                         log_trace!($logger, "Persisting scorer");
395                                         if let Err(e) = $persister.persist_scorer(&scorer) {
396                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
397                                         }
398                                 }
399                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
400                         }
401
402                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
403                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
404                                 $chain_monitor.rebroadcast_pending_claims();
405                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
406                         }
407                 }
408
409                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
410                 // some races where users quit while channel updates were in-flight, with
411                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
412                 $persister.persist_manager(&*$channel_manager)?;
413
414                 // Persist Scorer on exit
415                 if let Some(ref scorer) = $scorer {
416                         $persister.persist_scorer(&scorer)?;
417                 }
418
419                 // Persist NetworkGraph on exit
420                 if let Some(network_graph) = $gossip_sync.network_graph() {
421                         $persister.persist_graph(network_graph)?;
422                 }
423
424                 Ok(())
425         } }
426 }
427
428 #[cfg(feature = "futures")]
429 pub(crate) mod futures_util {
430         use core::future::Future;
431         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
432         use core::pin::Pin;
433         use core::marker::Unpin;
434         pub(crate) struct Selector<
435                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
436         > {
437                 pub a: A,
438                 pub b: B,
439                 pub c: C,
440         }
441         pub(crate) enum SelectorOutput {
442                 A, B, C(bool),
443         }
444
445         impl<
446                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
447         > Future for Selector<A, B, C> {
448                 type Output = SelectorOutput;
449                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
450                         match Pin::new(&mut self.a).poll(ctx) {
451                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
452                                 Poll::Pending => {},
453                         }
454                         match Pin::new(&mut self.b).poll(ctx) {
455                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
456                                 Poll::Pending => {},
457                         }
458                         match Pin::new(&mut self.c).poll(ctx) {
459                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
460                                 Poll::Pending => {},
461                         }
462                         Poll::Pending
463                 }
464         }
465
466         // If we want to poll a future without an async context to figure out if it has completed or
467         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
468         // but sadly there's a good bit of boilerplate here.
469         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
470         fn dummy_waker_action(_: *const ()) { }
471
472         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
473                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
474         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
475 }
476 #[cfg(feature = "futures")]
477 use futures_util::{Selector, SelectorOutput, dummy_waker};
478 #[cfg(feature = "futures")]
479 use core::task;
480
481 /// Processes background events in a future.
482 ///
483 /// `sleeper` should return a future which completes in the given amount of time and returns a
484 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
485 /// future which outputs `true`, the loop will exit and this function's future will complete.
486 /// The `sleeper` future is free to return early after it has triggered the exit condition.
487 ///
488 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
489 ///
490 /// Requires the `futures` feature. Note that while this method is available without the `std`
491 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
492 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
493 /// manually instead.
494 ///
495 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
496 /// mobile device, where we may need to check for interruption of the application regularly. If you
497 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
498 /// are hundreds or thousands of simultaneous process calls running.
499 ///
500 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
501 /// could setup `process_events_async` like this:
502 /// ```
503 /// # use lightning::io;
504 /// # use std::sync::{Arc, Mutex};
505 /// # use std::sync::atomic::{AtomicBool, Ordering};
506 /// # use lightning_background_processor::{process_events_async, GossipSync};
507 /// # struct MyStore {}
508 /// # impl lightning::util::persist::KVStore for MyStore {
509 /// #     fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
510 /// #     fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
511 /// #     fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
512 /// #     fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
513 /// # }
514 /// # struct MyEventHandler {}
515 /// # impl MyEventHandler {
516 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
517 /// # }
518 /// # #[derive(Eq, PartialEq, Clone, Hash)]
519 /// # struct MySocketDescriptor {}
520 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
521 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
522 /// #     fn disconnect_socket(&mut self) {}
523 /// # }
524 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
525 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
526 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
527 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
528 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
529 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
530 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
531 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
532 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
533 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
534 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
535 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
536 ///
537 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
538 ///     let background_persister = Arc::clone(&my_persister);
539 ///     let background_event_handler = Arc::clone(&my_event_handler);
540 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
541 ///     let background_chan_man = Arc::clone(&my_channel_manager);
542 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
543 ///     let background_peer_man = Arc::clone(&my_peer_manager);
544 ///     let background_logger = Arc::clone(&my_logger);
545 ///     let background_scorer = Arc::clone(&my_scorer);
546 ///
547 ///     // Setup the sleeper.
548 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
549 ///
550 ///     let sleeper = move |d| {
551 ///             let mut receiver = stop_receiver.clone();
552 ///             Box::pin(async move {
553 ///                     tokio::select!{
554 ///                             _ = tokio::time::sleep(d) => false,
555 ///                             _ = receiver.changed() => true,
556 ///                     }
557 ///             })
558 ///     };
559 ///
560 ///     let mobile_interruptable_platform = false;
561 ///
562 ///     let handle = tokio::spawn(async move {
563 ///             process_events_async(
564 ///                     background_persister,
565 ///                     |e| background_event_handler.handle_event(e),
566 ///                     background_chain_mon,
567 ///                     background_chan_man,
568 ///                     background_gossip_sync,
569 ///                     background_peer_man,
570 ///                     background_logger,
571 ///                     Some(background_scorer),
572 ///                     sleeper,
573 ///                     mobile_interruptable_platform,
574 ///                     )
575 ///                     .await
576 ///                     .expect("Failed to process events");
577 ///     });
578 ///
579 ///     // Stop the background processing.
580 ///     stop_sender.send(()).unwrap();
581 ///     handle.await.unwrap();
582 ///     # }
583 ///```
584 #[cfg(feature = "futures")]
585 pub async fn process_events_async<
586         'a,
587         UL: 'static + Deref + Send + Sync,
588         CF: 'static + Deref + Send + Sync,
589         CW: 'static + Deref + Send + Sync,
590         T: 'static + Deref + Send + Sync,
591         ES: 'static + Deref + Send + Sync,
592         NS: 'static + Deref + Send + Sync,
593         SP: 'static + Deref + Send + Sync,
594         F: 'static + Deref + Send + Sync,
595         R: 'static + Deref + Send + Sync,
596         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
597         L: 'static + Deref + Send + Sync,
598         P: 'static + Deref + Send + Sync,
599         EventHandlerFuture: core::future::Future<Output = ()>,
600         EventHandler: Fn(Event) -> EventHandlerFuture,
601         PS: 'static + Deref + Send,
602         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
603         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
604         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
605         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
606         APM: APeerManager + Send + Sync,
607         PM: 'static + Deref<Target = APM> + Send + Sync,
608         S: 'static + Deref<Target = SC> + Send + Sync,
609         SC: for<'b> WriteableScore<'b>,
610         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
611         Sleeper: Fn(Duration) -> SleepFuture
612 >(
613         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
614         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
615         sleeper: Sleeper, mobile_interruptable_platform: bool,
616 ) -> Result<(), lightning::io::Error>
617 where
618         UL::Target: 'static + UtxoLookup,
619         CF::Target: 'static + chain::Filter,
620         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
621         T::Target: 'static + BroadcasterInterface,
622         ES::Target: 'static + EntropySource,
623         NS::Target: 'static + NodeSigner,
624         SP::Target: 'static + SignerProvider,
625         F::Target: 'static + FeeEstimator,
626         R::Target: 'static + Router,
627         L::Target: 'static + Logger,
628         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
629         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
630 {
631         let mut should_break = false;
632         let async_event_handler = |event| {
633                 let network_graph = gossip_sync.network_graph();
634                 let event_handler = &event_handler;
635                 let scorer = &scorer;
636                 let logger = &logger;
637                 let persister = &persister;
638                 async move {
639                         if let Some(network_graph) = network_graph {
640                                 handle_network_graph_update(network_graph, &event)
641                         }
642                         if let Some(ref scorer) = scorer {
643                                 if update_scorer(scorer, &event) {
644                                         log_trace!(logger, "Persisting scorer after update");
645                                         if let Err(e) = persister.persist_scorer(&scorer) {
646                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
647                                         }
648                                 }
649                         }
650                         event_handler(event).await;
651                 }
652         };
653         define_run_body!(persister,
654                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
655                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
656                 gossip_sync, peer_manager, logger, scorer, should_break, {
657                         let fut = Selector {
658                                 a: channel_manager.get_persistable_update_future(),
659                                 b: chain_monitor.get_update_future(),
660                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
661                         };
662                         match fut.await {
663                                 SelectorOutput::A => true,
664                                 SelectorOutput::B => false,
665                                 SelectorOutput::C(exit) => {
666                                         should_break = exit;
667                                         false
668                                 }
669                         }
670                 }, |t| sleeper(Duration::from_secs(t)),
671                 |fut: &mut SleepFuture, _| {
672                         let mut waker = dummy_waker();
673                         let mut ctx = task::Context::from_waker(&mut waker);
674                         match core::pin::Pin::new(fut).poll(&mut ctx) {
675                                 task::Poll::Ready(exit) => { should_break = exit; true },
676                                 task::Poll::Pending => false,
677                         }
678                 }, mobile_interruptable_platform)
679 }
680
681 #[cfg(feature = "std")]
682 impl BackgroundProcessor {
683         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
684         /// documentation].
685         ///
686         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
687         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
688         /// either [`join`] or [`stop`].
689         ///
690         /// # Data Persistence
691         ///
692         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
693         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
694         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
695         /// provided implementation.
696         ///
697         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
698         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
699         /// See the `lightning-persister` crate for LDK's provided implementation.
700         ///
701         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
702         /// error or call [`join`] and handle any error that may arise. For the latter case,
703         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
704         ///
705         /// # Event Handling
706         ///
707         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
708         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
709         /// functionality implemented by other handlers.
710         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
711         ///
712         /// # Rapid Gossip Sync
713         ///
714         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
715         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
716         /// until the [`RapidGossipSync`] instance completes its first sync.
717         ///
718         /// [top-level documentation]: BackgroundProcessor
719         /// [`join`]: Self::join
720         /// [`stop`]: Self::stop
721         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
722         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
723         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
724         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
725         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
726         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
727         pub fn start<
728                 'a,
729                 UL: 'static + Deref + Send + Sync,
730                 CF: 'static + Deref + Send + Sync,
731                 CW: 'static + Deref + Send + Sync,
732                 T: 'static + Deref + Send + Sync,
733                 ES: 'static + Deref + Send + Sync,
734                 NS: 'static + Deref + Send + Sync,
735                 SP: 'static + Deref + Send + Sync,
736                 F: 'static + Deref + Send + Sync,
737                 R: 'static + Deref + Send + Sync,
738                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
739                 L: 'static + Deref + Send + Sync,
740                 P: 'static + Deref + Send + Sync,
741                 EH: 'static + EventHandler + Send,
742                 PS: 'static + Deref + Send,
743                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
744                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
745                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
746                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
747                 APM: APeerManager + Send + Sync,
748                 PM: 'static + Deref<Target = APM> + Send + Sync,
749                 S: 'static + Deref<Target = SC> + Send + Sync,
750                 SC: for <'b> WriteableScore<'b>,
751         >(
752                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
753                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
754         ) -> Self
755         where
756                 UL::Target: 'static + UtxoLookup,
757                 CF::Target: 'static + chain::Filter,
758                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
759                 T::Target: 'static + BroadcasterInterface,
760                 ES::Target: 'static + EntropySource,
761                 NS::Target: 'static + NodeSigner,
762                 SP::Target: 'static + SignerProvider,
763                 F::Target: 'static + FeeEstimator,
764                 R::Target: 'static + Router,
765                 L::Target: 'static + Logger,
766                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
767                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
768         {
769                 let stop_thread = Arc::new(AtomicBool::new(false));
770                 let stop_thread_clone = stop_thread.clone();
771                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
772                         let event_handler = |event| {
773                                 let network_graph = gossip_sync.network_graph();
774                                 if let Some(network_graph) = network_graph {
775                                         handle_network_graph_update(network_graph, &event)
776                                 }
777                                 if let Some(ref scorer) = scorer {
778                                         if update_scorer(scorer, &event) {
779                                                 log_trace!(logger, "Persisting scorer after update");
780                                                 if let Err(e) = persister.persist_scorer(&scorer) {
781                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
782                                                 }
783                                         }
784                                 }
785                                 event_handler.handle_event(event);
786                         };
787                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
788                                 channel_manager, channel_manager.process_pending_events(&event_handler),
789                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
790                                 Sleeper::from_two_futures(
791                                         channel_manager.get_persistable_update_future(),
792                                         chain_monitor.get_update_future()
793                                 ).wait_timeout(Duration::from_millis(100)),
794                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
795                 });
796                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
797         }
798
799         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
800         /// [`ChannelManager`].
801         ///
802         /// # Panics
803         ///
804         /// This function panics if the background thread has panicked such as while persisting or
805         /// handling events.
806         ///
807         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
808         pub fn join(mut self) -> Result<(), std::io::Error> {
809                 assert!(self.thread_handle.is_some());
810                 self.join_thread()
811         }
812
813         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
814         /// [`ChannelManager`].
815         ///
816         /// # Panics
817         ///
818         /// This function panics if the background thread has panicked such as while persisting or
819         /// handling events.
820         ///
821         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
822         pub fn stop(mut self) -> Result<(), std::io::Error> {
823                 assert!(self.thread_handle.is_some());
824                 self.stop_and_join_thread()
825         }
826
827         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
828                 self.stop_thread.store(true, Ordering::Release);
829                 self.join_thread()
830         }
831
832         fn join_thread(&mut self) -> Result<(), std::io::Error> {
833                 match self.thread_handle.take() {
834                         Some(handle) => handle.join().unwrap(),
835                         None => Ok(()),
836                 }
837         }
838 }
839
840 #[cfg(feature = "std")]
841 impl Drop for BackgroundProcessor {
842         fn drop(&mut self) {
843                 self.stop_and_join_thread().unwrap();
844         }
845 }
846
847 #[cfg(all(feature = "std", test))]
848 mod tests {
849         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
850         use bitcoin::blockdata::locktime::PackedLockTime;
851         use bitcoin::blockdata::transaction::{Transaction, TxOut};
852         use bitcoin::network::constants::Network;
853         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
854         use lightning::chain::{BestBlock, Confirm, chainmonitor};
855         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
856         use lightning::sign::{InMemorySigner, KeysManager};
857         use lightning::chain::transaction::OutPoint;
858         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
859         use lightning::{get_event_msg, get_event};
860         use lightning::ln::PaymentHash;
861         use lightning::ln::channelmanager;
862         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
863         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
864         use lightning::ln::functional_test_utils::*;
865         use lightning::ln::msgs::{ChannelMessageHandler, Init};
866         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
867         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
868         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
869         use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
870         use lightning::util::config::UserConfig;
871         use lightning::util::ser::Writeable;
872         use lightning::util::test_utils;
873         use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
874         use lightning_persister::fs_store::FilesystemStore;
875         use std::collections::VecDeque;
876         use std::{fs, env};
877         use std::path::PathBuf;
878         use std::sync::{Arc, Mutex};
879         use std::sync::mpsc::SyncSender;
880         use std::time::Duration;
881         use lightning_rapid_gossip_sync::RapidGossipSync;
882         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
883
884         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
885
886         #[derive(Clone, Hash, PartialEq, Eq)]
887         struct TestDescriptor{}
888         impl SocketDescriptor for TestDescriptor {
889                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
890                         0
891                 }
892
893                 fn disconnect_socket(&mut self) {}
894         }
895
896         type ChannelManager =
897                 channelmanager::ChannelManager<
898                         Arc<ChainMonitor>,
899                         Arc<test_utils::TestBroadcaster>,
900                         Arc<KeysManager>,
901                         Arc<KeysManager>,
902                         Arc<KeysManager>,
903                         Arc<test_utils::TestFeeEstimator>,
904                         Arc<DefaultRouter<
905                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
906                                 Arc<test_utils::TestLogger>,
907                                 Arc<Mutex<TestScorer>>,
908                                 (),
909                                 TestScorer>
910                         >,
911                         Arc<test_utils::TestLogger>>;
912
913         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
914
915         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
916         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
917
918         struct Node {
919                 node: Arc<ChannelManager>,
920                 p2p_gossip_sync: PGS,
921                 rapid_gossip_sync: RGS,
922                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
923                 chain_monitor: Arc<ChainMonitor>,
924                 kv_store: Arc<FilesystemStore>,
925                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
926                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
927                 logger: Arc<test_utils::TestLogger>,
928                 best_block: BestBlock,
929                 scorer: Arc<Mutex<TestScorer>>,
930         }
931
932         impl Node {
933                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
934                         GossipSync::P2P(self.p2p_gossip_sync.clone())
935                 }
936
937                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
938                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
939                 }
940
941                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
942                         GossipSync::None
943                 }
944         }
945
946         impl Drop for Node {
947                 fn drop(&mut self) {
948                         let data_dir = self.kv_store.get_data_dir();
949                         match fs::remove_dir_all(data_dir.clone()) {
950                                 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
951                                 _ => {}
952                         }
953                 }
954         }
955
956         struct Persister {
957                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
958                 graph_persistence_notifier: Option<SyncSender<()>>,
959                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
960                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
961                 kv_store: FilesystemStore,
962         }
963
964         impl Persister {
965                 fn new(data_dir: PathBuf) -> Self {
966                         let kv_store = FilesystemStore::new(data_dir);
967                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
968                 }
969
970                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
971                         Self { graph_error: Some((error, message)), ..self }
972                 }
973
974                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
975                         Self { graph_persistence_notifier: Some(sender), ..self }
976                 }
977
978                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
979                         Self { manager_error: Some((error, message)), ..self }
980                 }
981
982                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
983                         Self { scorer_error: Some((error, message)), ..self }
984                 }
985         }
986
987         impl KVStore for Persister {
988                 fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
989                         self.kv_store.read(namespace, sub_namespace, key)
990                 }
991
992                 fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
993                         if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
994                                 sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
995                                 key == CHANNEL_MANAGER_PERSISTENCE_KEY
996                         {
997                                 if let Some((error, message)) = self.manager_error {
998                                         return Err(std::io::Error::new(error, message))
999                                 }
1000                         }
1001
1002                         if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
1003                                 sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
1004                                 key == NETWORK_GRAPH_PERSISTENCE_KEY
1005                         {
1006                                 if let Some(sender) = &self.graph_persistence_notifier {
1007                                         match sender.send(()) {
1008                                                 Ok(()) => {},
1009                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1010                                         }
1011                                 };
1012
1013                                 if let Some((error, message)) = self.graph_error {
1014                                         return Err(std::io::Error::new(error, message))
1015                                 }
1016                         }
1017
1018                         if namespace == SCORER_PERSISTENCE_NAMESPACE &&
1019                                 sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
1020                                 key == SCORER_PERSISTENCE_KEY
1021                         {
1022                                 if let Some((error, message)) = self.scorer_error {
1023                                         return Err(std::io::Error::new(error, message))
1024                                 }
1025                         }
1026
1027                         self.kv_store.write(namespace, sub_namespace, key, buf)
1028                 }
1029
1030                 fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1031                         self.kv_store.remove(namespace, sub_namespace, key, lazy)
1032                 }
1033
1034                 fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
1035                         self.kv_store.list(namespace, sub_namespace)
1036                 }
1037         }
1038
1039         struct TestScorer {
1040                 event_expectations: Option<VecDeque<TestResult>>,
1041         }
1042
1043         #[derive(Debug)]
1044         enum TestResult {
1045                 PaymentFailure { path: Path, short_channel_id: u64 },
1046                 PaymentSuccess { path: Path },
1047                 ProbeFailure { path: Path },
1048                 ProbeSuccess { path: Path },
1049         }
1050
1051         impl TestScorer {
1052                 fn new() -> Self {
1053                         Self { event_expectations: None }
1054                 }
1055
1056                 fn expect(&mut self, expectation: TestResult) {
1057                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1058                 }
1059         }
1060
1061         impl lightning::util::ser::Writeable for TestScorer {
1062                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1063         }
1064
1065         impl ScoreLookUp for TestScorer {
1066                 type ScoreParams = ();
1067                 fn channel_penalty_msat(
1068                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1069                 ) -> u64 { unimplemented!(); }
1070         }
1071
1072         impl ScoreUpdate for TestScorer {
1073                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1074                         if let Some(expectations) = &mut self.event_expectations {
1075                                 match expectations.pop_front().unwrap() {
1076                                         TestResult::PaymentFailure { path, short_channel_id } => {
1077                                                 assert_eq!(actual_path, &path);
1078                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1079                                         },
1080                                         TestResult::PaymentSuccess { path } => {
1081                                                 panic!("Unexpected successful payment path: {:?}", path)
1082                                         },
1083                                         TestResult::ProbeFailure { path } => {
1084                                                 panic!("Unexpected probe failure: {:?}", path)
1085                                         },
1086                                         TestResult::ProbeSuccess { path } => {
1087                                                 panic!("Unexpected probe success: {:?}", path)
1088                                         }
1089                                 }
1090                         }
1091                 }
1092
1093                 fn payment_path_successful(&mut self, actual_path: &Path) {
1094                         if let Some(expectations) = &mut self.event_expectations {
1095                                 match expectations.pop_front().unwrap() {
1096                                         TestResult::PaymentFailure { path, .. } => {
1097                                                 panic!("Unexpected payment path failure: {:?}", path)
1098                                         },
1099                                         TestResult::PaymentSuccess { path } => {
1100                                                 assert_eq!(actual_path, &path);
1101                                         },
1102                                         TestResult::ProbeFailure { path } => {
1103                                                 panic!("Unexpected probe failure: {:?}", path)
1104                                         },
1105                                         TestResult::ProbeSuccess { path } => {
1106                                                 panic!("Unexpected probe success: {:?}", path)
1107                                         }
1108                                 }
1109                         }
1110                 }
1111
1112                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1113                         if let Some(expectations) = &mut self.event_expectations {
1114                                 match expectations.pop_front().unwrap() {
1115                                         TestResult::PaymentFailure { path, .. } => {
1116                                                 panic!("Unexpected payment path failure: {:?}", path)
1117                                         },
1118                                         TestResult::PaymentSuccess { path } => {
1119                                                 panic!("Unexpected payment path success: {:?}", path)
1120                                         },
1121                                         TestResult::ProbeFailure { path } => {
1122                                                 assert_eq!(actual_path, &path);
1123                                         },
1124                                         TestResult::ProbeSuccess { path } => {
1125                                                 panic!("Unexpected probe success: {:?}", path)
1126                                         }
1127                                 }
1128                         }
1129                 }
1130                 fn probe_successful(&mut self, actual_path: &Path) {
1131                         if let Some(expectations) = &mut self.event_expectations {
1132                                 match expectations.pop_front().unwrap() {
1133                                         TestResult::PaymentFailure { path, .. } => {
1134                                                 panic!("Unexpected payment path failure: {:?}", path)
1135                                         },
1136                                         TestResult::PaymentSuccess { path } => {
1137                                                 panic!("Unexpected payment path success: {:?}", path)
1138                                         },
1139                                         TestResult::ProbeFailure { path } => {
1140                                                 panic!("Unexpected probe failure: {:?}", path)
1141                                         },
1142                                         TestResult::ProbeSuccess { path } => {
1143                                                 assert_eq!(actual_path, &path);
1144                                         }
1145                                 }
1146                         }
1147                 }
1148         }
1149
1150         impl Drop for TestScorer {
1151                 fn drop(&mut self) {
1152                         if std::thread::panicking() {
1153                                 return;
1154                         }
1155
1156                         if let Some(event_expectations) = &self.event_expectations {
1157                                 if !event_expectations.is_empty() {
1158                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1159                                 }
1160                         }
1161                 }
1162         }
1163
1164         fn get_full_filepath(filepath: String, filename: String) -> String {
1165                 let mut path = PathBuf::from(filepath);
1166                 path.push(filename);
1167                 path.to_str().unwrap().to_string()
1168         }
1169
1170         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1171                 let persist_temp_path = env::temp_dir().join(persist_dir);
1172                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1173                 let network = Network::Bitcoin;
1174                 let mut nodes = Vec::new();
1175                 for i in 0..num_nodes {
1176                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1177                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1178                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1179                         let genesis_block = genesis_block(network);
1180                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1181                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1182                         let seed = [i as u8; 32];
1183                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
1184                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1185                         let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1186                         let now = Duration::from_secs(genesis_block.header.time as u64);
1187                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1188                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1189                         let best_block = BestBlock::from_network(network);
1190                         let params = ChainParameters { network, best_block };
1191                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1192                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1193                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1194                         let msg_handler = MessageHandler {
1195                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1196                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1197                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1198                         };
1199                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1200                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1201                         nodes.push(node);
1202                 }
1203
1204                 for i in 0..num_nodes {
1205                         for j in (i+1)..num_nodes {
1206                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1207                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1208                                 }, true).unwrap();
1209                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1210                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1211                                 }, false).unwrap();
1212                         }
1213                 }
1214
1215                 (persist_dir, nodes)
1216         }
1217
1218         macro_rules! open_channel {
1219                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1220                         begin_open_channel!($node_a, $node_b, $channel_value);
1221                         let events = $node_a.node.get_and_clear_pending_events();
1222                         assert_eq!(events.len(), 1);
1223                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1224                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1225                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1226                         get_event!($node_b, Event::ChannelPending);
1227                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1228                         get_event!($node_a, Event::ChannelPending);
1229                         tx
1230                 }}
1231         }
1232
1233         macro_rules! begin_open_channel {
1234                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1235                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1236                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1237                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1238                 }}
1239         }
1240
1241         macro_rules! handle_funding_generation_ready {
1242                 ($event: expr, $channel_value: expr) => {{
1243                         match $event {
1244                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1245                                         assert_eq!(channel_value_satoshis, $channel_value);
1246                                         assert_eq!(user_channel_id, 42);
1247
1248                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1249                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1250                                         }]};
1251                                         (temporary_channel_id, tx)
1252                                 },
1253                                 _ => panic!("Unexpected event"),
1254                         }
1255                 }}
1256         }
1257
1258         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1259                 for i in 1..=depth {
1260                         let prev_blockhash = node.best_block.block_hash();
1261                         let height = node.best_block.height() + 1;
1262                         let header = create_dummy_header(prev_blockhash, height);
1263                         let txdata = vec![(0, tx)];
1264                         node.best_block = BestBlock::new(header.block_hash(), height);
1265                         match i {
1266                                 1 => {
1267                                         node.node.transactions_confirmed(&header, &txdata, height);
1268                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1269                                 },
1270                                 x if x == depth => {
1271                                         node.node.best_block_updated(&header, height);
1272                                         node.chain_monitor.best_block_updated(&header, height);
1273                                 },
1274                                 _ => {},
1275                         }
1276                 }
1277         }
1278         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1279                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1280         }
1281
1282         #[test]
1283         fn test_background_processor() {
1284                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1285                 // updates. Also test that when new updates are available, the manager signals that it needs
1286                 // re-persistence and is successfully re-persisted.
1287                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1288
1289                 // Go through the channel creation process so that each node has something to persist. Since
1290                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1291                 // avoid a race with processing events.
1292                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1293
1294                 // Initiate the background processors to watch each node.
1295                 let data_dir = nodes[0].kv_store.get_data_dir();
1296                 let persister = Arc::new(Persister::new(data_dir));
1297                 let event_handler = |_: _| {};
1298                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1299
1300                 macro_rules! check_persisted_data {
1301                         ($node: expr, $filepath: expr) => {
1302                                 let mut expected_bytes = Vec::new();
1303                                 loop {
1304                                         expected_bytes.clear();
1305                                         match $node.write(&mut expected_bytes) {
1306                                                 Ok(()) => {
1307                                                         match std::fs::read($filepath) {
1308                                                                 Ok(bytes) => {
1309                                                                         if bytes == expected_bytes {
1310                                                                                 break
1311                                                                         } else {
1312                                                                                 continue
1313                                                                         }
1314                                                                 },
1315                                                                 Err(_) => continue
1316                                                         }
1317                                                 },
1318                                                 Err(e) => panic!("Unexpected error: {}", e)
1319                                         }
1320                                 }
1321                         }
1322                 }
1323
1324                 // Check that the initial channel manager data is persisted as expected.
1325                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1326                 check_persisted_data!(nodes[0].node, filepath.clone());
1327
1328                 loop {
1329                         if !nodes[0].node.get_persistence_condvar_value() { break }
1330                 }
1331
1332                 // Force-close the channel.
1333                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1334
1335                 // Check that the force-close updates are persisted.
1336                 check_persisted_data!(nodes[0].node, filepath.clone());
1337                 loop {
1338                         if !nodes[0].node.get_persistence_condvar_value() { break }
1339                 }
1340
1341                 // Check network graph is persisted
1342                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1343                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1344
1345                 // Check scorer is persisted
1346                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1347                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1348
1349                 if !std::thread::panicking() {
1350                         bg_processor.stop().unwrap();
1351                 }
1352         }
1353
1354         #[test]
1355         fn test_timer_tick_called() {
1356                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1357                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1358                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1359                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1360                 let data_dir = nodes[0].kv_store.get_data_dir();
1361                 let persister = Arc::new(Persister::new(data_dir));
1362                 let event_handler = |_: _| {};
1363                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1364                 loop {
1365                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1366                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1367                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1368                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1369                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1370                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1371                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1372                                 break
1373                         }
1374                 }
1375
1376                 if !std::thread::panicking() {
1377                         bg_processor.stop().unwrap();
1378                 }
1379         }
1380
1381         #[test]
1382         fn test_channel_manager_persist_error() {
1383                 // Test that if we encounter an error during manager persistence, the thread panics.
1384                 let (_, nodes) = create_nodes(2, "test_persist_error");
1385                 open_channel!(nodes[0], nodes[1], 100000);
1386
1387                 let data_dir = nodes[0].kv_store.get_data_dir();
1388                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1389                 let event_handler = |_: _| {};
1390                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1391                 match bg_processor.join() {
1392                         Ok(_) => panic!("Expected error persisting manager"),
1393                         Err(e) => {
1394                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1395                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1396                         },
1397                 }
1398         }
1399
1400         #[tokio::test]
1401         #[cfg(feature = "futures")]
1402         async fn test_channel_manager_persist_error_async() {
1403                 // Test that if we encounter an error during manager persistence, the thread panics.
1404                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1405                 open_channel!(nodes[0], nodes[1], 100000);
1406
1407                 let data_dir = nodes[0].kv_store.get_data_dir();
1408                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1409
1410                 let bp_future = super::process_events_async(
1411                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1412                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1413                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1414                                 Box::pin(async move {
1415                                         tokio::time::sleep(dur).await;
1416                                         false // Never exit
1417                                 })
1418                         }, false,
1419                 );
1420                 match bp_future.await {
1421                         Ok(_) => panic!("Expected error persisting manager"),
1422                         Err(e) => {
1423                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1424                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1425                         },
1426                 }
1427         }
1428
1429         #[test]
1430         fn test_network_graph_persist_error() {
1431                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1432                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1433                 let data_dir = nodes[0].kv_store.get_data_dir();
1434                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1435                 let event_handler = |_: _| {};
1436                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1437
1438                 match bg_processor.stop() {
1439                         Ok(_) => panic!("Expected error persisting network graph"),
1440                         Err(e) => {
1441                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1442                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1443                         },
1444                 }
1445         }
1446
1447         #[test]
1448         fn test_scorer_persist_error() {
1449                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1450                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1451                 let data_dir = nodes[0].kv_store.get_data_dir();
1452                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1453                 let event_handler = |_: _| {};
1454                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1455
1456                 match bg_processor.stop() {
1457                         Ok(_) => panic!("Expected error persisting scorer"),
1458                         Err(e) => {
1459                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1460                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1461                         },
1462                 }
1463         }
1464
1465         #[test]
1466         fn test_background_event_handling() {
1467                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1468                 let channel_value = 100000;
1469                 let data_dir = nodes[0].kv_store.get_data_dir();
1470                 let persister = Arc::new(Persister::new(data_dir.clone()));
1471
1472                 // Set up a background event handler for FundingGenerationReady events.
1473                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1474                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1475                 let event_handler = move |event: Event| match event {
1476                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1477                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1478                         Event::ChannelReady { .. } => {},
1479                         _ => panic!("Unexpected event: {:?}", event),
1480                 };
1481
1482                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1483
1484                 // Open a channel and check that the FundingGenerationReady event was handled.
1485                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1486                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1487                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1488                         .expect("FundingGenerationReady not handled within deadline");
1489                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1490                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1491                 get_event!(nodes[1], Event::ChannelPending);
1492                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1493                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1494                         .expect("ChannelPending not handled within deadline");
1495
1496                 // Confirm the funding transaction.
1497                 confirm_transaction(&mut nodes[0], &funding_tx);
1498                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1499                 confirm_transaction(&mut nodes[1], &funding_tx);
1500                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1501                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1502                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1503                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1504                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1505
1506                 if !std::thread::panicking() {
1507                         bg_processor.stop().unwrap();
1508                 }
1509
1510                 // Set up a background event handler for SpendableOutputs events.
1511                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1512                 let event_handler = move |event: Event| match event {
1513                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1514                         Event::ChannelReady { .. } => {},
1515                         Event::ChannelClosed { .. } => {},
1516                         _ => panic!("Unexpected event: {:?}", event),
1517                 };
1518                 let persister = Arc::new(Persister::new(data_dir));
1519                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1520
1521                 // Force close the channel and check that the SpendableOutputs event was handled.
1522                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1523                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1524                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1525
1526                 let event = receiver
1527                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1528                         .expect("Events not handled within deadline");
1529                 match event {
1530                         Event::SpendableOutputs { .. } => {},
1531                         _ => panic!("Unexpected event: {:?}", event),
1532                 }
1533
1534                 if !std::thread::panicking() {
1535                         bg_processor.stop().unwrap();
1536                 }
1537         }
1538
1539         #[test]
1540         fn test_scorer_persistence() {
1541                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1542                 let data_dir = nodes[0].kv_store.get_data_dir();
1543                 let persister = Arc::new(Persister::new(data_dir));
1544                 let event_handler = |_: _| {};
1545                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1546
1547                 loop {
1548                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1549                         let expected_log = "Persisting scorer".to_string();
1550                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1551                                 break
1552                         }
1553                 }
1554
1555                 if !std::thread::panicking() {
1556                         bg_processor.stop().unwrap();
1557                 }
1558         }
1559
1560         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1561                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1562                         let features = ChannelFeatures::empty();
1563                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1564                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1565                         ).expect("Failed to update channel from partial announcement");
1566                         let original_graph_description = $nodes[0].network_graph.to_string();
1567                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1568                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1569
1570                         loop {
1571                                 $sleep;
1572                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1573                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1574                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1575                                         .unwrap_or(&0) > 1
1576                                 {
1577                                         // Wait until the loop has gone around at least twice.
1578                                         break
1579                                 }
1580                         }
1581
1582                         let initialization_input = vec![
1583                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1584                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1585                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1586                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1587                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1588                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1589                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1590                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1591                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1592                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1593                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1594                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1595                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1596                         ];
1597                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1598
1599                         // this should have added two channels and pruned the previous one.
1600                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1601
1602                         $receive.expect("Network graph not pruned within deadline");
1603
1604                         // all channels should now be pruned
1605                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1606                 }
1607         }
1608
1609         #[test]
1610         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1611                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1612
1613                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1614                 let data_dir = nodes[0].kv_store.get_data_dir();
1615                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1616
1617                 let event_handler = |_: _| {};
1618                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1619
1620                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1621                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1622                         std::thread::sleep(Duration::from_millis(1)));
1623
1624                 background_processor.stop().unwrap();
1625         }
1626
1627         #[tokio::test]
1628         #[cfg(feature = "futures")]
1629         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1630                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1631
1632                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1633                 let data_dir = nodes[0].kv_store.get_data_dir();
1634                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1635
1636                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1637                 let bp_future = super::process_events_async(
1638                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1639                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1640                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1641                                 let mut exit_receiver = exit_receiver.clone();
1642                                 Box::pin(async move {
1643                                         tokio::select! {
1644                                                 _ = tokio::time::sleep(dur) => false,
1645                                                 _ = exit_receiver.changed() => true,
1646                                         }
1647                                 })
1648                         }, false,
1649                 );
1650
1651                 let t1 = tokio::spawn(bp_future);
1652                 let t2 = tokio::spawn(async move {
1653                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1654                                 let mut i = 0;
1655                                 loop {
1656                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1657                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1658                                         assert!(i < 5);
1659                                         i += 1;
1660                                 }
1661                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1662                         exit_sender.send(()).unwrap();
1663                 });
1664                 let (r1, r2) = tokio::join!(t1, t2);
1665                 r1.unwrap().unwrap();
1666                 r2.unwrap()
1667         }
1668
1669         macro_rules! do_test_payment_path_scoring {
1670                 ($nodes: expr, $receive: expr) => {
1671                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1672                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1673                         // public or else we won't score it).
1674                         // A background event handler for FundingGenerationReady events must be hooked up to a
1675                         // running background processor.
1676                         let scored_scid = 4242;
1677                         let secp_ctx = Secp256k1::new();
1678                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1679                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1680
1681                         let path = Path { hops: vec![RouteHop {
1682                                 pubkey: node_1_id,
1683                                 node_features: NodeFeatures::empty(),
1684                                 short_channel_id: scored_scid,
1685                                 channel_features: ChannelFeatures::empty(),
1686                                 fee_msat: 0,
1687                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1688                                 maybe_announced_channel: true,
1689                         }], blinded_tail: None };
1690
1691                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1692                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1693                                 payment_id: None,
1694                                 payment_hash: PaymentHash([42; 32]),
1695                                 payment_failed_permanently: false,
1696                                 failure: PathFailure::OnPath { network_update: None },
1697                                 path: path.clone(),
1698                                 short_channel_id: Some(scored_scid),
1699                         });
1700                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1701                         match event {
1702                                 Event::PaymentPathFailed { .. } => {},
1703                                 _ => panic!("Unexpected event"),
1704                         }
1705
1706                         // Ensure we'll score payments that were explicitly failed back by the destination as
1707                         // ProbeSuccess.
1708                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1709                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1710                                 payment_id: None,
1711                                 payment_hash: PaymentHash([42; 32]),
1712                                 payment_failed_permanently: true,
1713                                 failure: PathFailure::OnPath { network_update: None },
1714                                 path: path.clone(),
1715                                 short_channel_id: None,
1716                         });
1717                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1718                         match event {
1719                                 Event::PaymentPathFailed { .. } => {},
1720                                 _ => panic!("Unexpected event"),
1721                         }
1722
1723                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1724                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1725                                 payment_id: PaymentId([42; 32]),
1726                                 payment_hash: None,
1727                                 path: path.clone(),
1728                         });
1729                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1730                         match event {
1731                                 Event::PaymentPathSuccessful { .. } => {},
1732                                 _ => panic!("Unexpected event"),
1733                         }
1734
1735                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1736                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1737                                 payment_id: PaymentId([42; 32]),
1738                                 payment_hash: PaymentHash([42; 32]),
1739                                 path: path.clone(),
1740                         });
1741                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1742                         match event {
1743                                 Event::ProbeSuccessful  { .. } => {},
1744                                 _ => panic!("Unexpected event"),
1745                         }
1746
1747                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1748                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1749                                 payment_id: PaymentId([42; 32]),
1750                                 payment_hash: PaymentHash([42; 32]),
1751                                 path,
1752                                 short_channel_id: Some(scored_scid),
1753                         });
1754                         let event = $receive.expect("ProbeFailure not handled within deadline");
1755                         match event {
1756                                 Event::ProbeFailed { .. } => {},
1757                                 _ => panic!("Unexpected event"),
1758                         }
1759                 }
1760         }
1761
1762         #[test]
1763         fn test_payment_path_scoring() {
1764                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1765                 let event_handler = move |event: Event| match event {
1766                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1767                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1768                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1769                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1770                         _ => panic!("Unexpected event: {:?}", event),
1771                 };
1772
1773                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1774                 let data_dir = nodes[0].kv_store.get_data_dir();
1775                 let persister = Arc::new(Persister::new(data_dir));
1776                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1777
1778                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1779
1780                 if !std::thread::panicking() {
1781                         bg_processor.stop().unwrap();
1782                 }
1783
1784                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1785                 let expected_log = "Persisting scorer after update".to_string();
1786                 assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1787         }
1788
1789         #[tokio::test]
1790         #[cfg(feature = "futures")]
1791         async fn test_payment_path_scoring_async() {
1792                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1793                 let event_handler = move |event: Event| {
1794                         let sender_ref = sender.clone();
1795                         async move {
1796                                 match event {
1797                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1798                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1799                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1800                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1801                                         _ => panic!("Unexpected event: {:?}", event),
1802                                 }
1803                         }
1804                 };
1805
1806                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1807                 let data_dir = nodes[0].kv_store.get_data_dir();
1808                 let persister = Arc::new(Persister::new(data_dir));
1809
1810                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1811
1812                 let bp_future = super::process_events_async(
1813                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1814                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1815                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1816                                 let mut exit_receiver = exit_receiver.clone();
1817                                 Box::pin(async move {
1818                                         tokio::select! {
1819                                                 _ = tokio::time::sleep(dur) => false,
1820                                                 _ = exit_receiver.changed() => true,
1821                                         }
1822                                 })
1823                         }, false,
1824                 );
1825                 let t1 = tokio::spawn(bp_future);
1826                 let t2 = tokio::spawn(async move {
1827                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1828                         exit_sender.send(()).unwrap();
1829
1830                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1831                         let expected_log = "Persisting scorer after update".to_string();
1832                         assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1833                 });
1834
1835                 let (r1, r2) = tokio::join!(t1, t2);
1836                 r1.unwrap().unwrap();
1837                 r2.unwrap()
1838         }
1839 }