Split LockableScore responsibilities between read & write operations
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         match event {
245                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
246                         let mut score = scorer.write_lock();
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         let mut score = scorer.write_lock();
253                         score.probe_successful(path);
254                 },
255                 Event::PaymentPathSuccessful { path, .. } => {
256                         let mut score = scorer.write_lock();
257                         score.payment_path_successful(path);
258                 },
259                 Event::ProbeSuccessful { path, .. } => {
260                         let mut score = scorer.write_lock();
261                         score.probe_successful(path);
262                 },
263                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
264                         let mut score = scorer.write_lock();
265                         score.probe_failed(path, *scid);
266                 },
267                 _ => return false,
268         }
269         true
270 }
271
272 macro_rules! define_run_body {
273         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
274          $channel_manager: ident, $process_channel_manager_events: expr,
275          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
276          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
277          $check_slow_await: expr)
278         => { {
279                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
280                 $channel_manager.timer_tick_occurred();
281                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
282                 $chain_monitor.rebroadcast_pending_claims();
283
284                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
285                 let mut last_ping_call = $get_timer(PING_TIMER);
286                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
287                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
288                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
289                 let mut have_pruned = false;
290
291                 loop {
292                         $process_channel_manager_events;
293                         $process_chain_monitor_events;
294
295                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
296                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
297                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
298                         // without running the normal event processing above and handing events to users.
299                         //
300                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
301                         // processing a message effectively at any point during this loop. In order to
302                         // minimize the time between such processing completing and persisting the updated
303                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
304                         // generally, and as a fallback place such blocking only immediately before
305                         // persistence.
306                         $peer_manager.as_ref().process_events();
307
308                         // Exit the loop if the background processor was requested to stop.
309                         if $loop_exit_check {
310                                 log_trace!($logger, "Terminating background processor.");
311                                 break;
312                         }
313
314                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
315                         // see `await_start`'s use below.
316                         let mut await_start = None;
317                         if $check_slow_await { await_start = Some($get_timer(1)); }
318                         let updates_available = $await;
319                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
320
321                         // Exit the loop if the background processor was requested to stop.
322                         if $loop_exit_check {
323                                 log_trace!($logger, "Terminating background processor.");
324                                 break;
325                         }
326
327                         if updates_available {
328                                 log_trace!($logger, "Persisting ChannelManager...");
329                                 $persister.persist_manager(&*$channel_manager)?;
330                                 log_trace!($logger, "Done persisting ChannelManager.");
331                         }
332                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
333                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
334                                 $channel_manager.timer_tick_occurred();
335                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
336                         }
337                         if await_slow {
338                                 // On various platforms, we may be starved of CPU cycles for several reasons.
339                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
340                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
341                                 // may not get any cycles.
342                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
343                                 // full second, at which point we assume sockets may have been killed (they
344                                 // appear to be at least on some platforms, even if it has only been a second).
345                                 // Note that we have to take care to not get here just because user event
346                                 // processing was slow at the top of the loop. For example, the sample client
347                                 // may call Bitcoin Core RPCs during event handling, which very often takes
348                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
349                                 // peers.
350                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
351                                 $peer_manager.as_ref().disconnect_all_peers();
352                                 last_ping_call = $get_timer(PING_TIMER);
353                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
354                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
355                                 $peer_manager.as_ref().timer_tick_occurred();
356                                 last_ping_call = $get_timer(PING_TIMER);
357                         }
358
359                         // Note that we want to run a graph prune once not long after startup before
360                         // falling back to our usual hourly prunes. This avoids short-lived clients never
361                         // pruning their network graph. We run once 60 seconds after startup before
362                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
363                         // we prune after an initial sync completes.
364                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
365                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
366                         let should_prune = match $gossip_sync {
367                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
368                                 _ => prune_timer_elapsed,
369                         };
370                         if should_prune {
371                                 // The network graph must not be pruned while rapid sync completion is pending
372                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
373                                         #[cfg(feature = "std")] {
374                                                 log_trace!($logger, "Pruning and persisting network graph.");
375                                                 network_graph.remove_stale_channels_and_tracking();
376                                         }
377                                         #[cfg(not(feature = "std"))] {
378                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
379                                                 log_trace!($logger, "Persisting network graph.");
380                                         }
381
382                                         if let Err(e) = $persister.persist_graph(network_graph) {
383                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
384                                         }
385
386                                         have_pruned = true;
387                                 }
388                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
389                                 last_prune_call = $get_timer(prune_timer);
390                         }
391
392                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
393                                 if let Some(ref scorer) = $scorer {
394                                         log_trace!($logger, "Persisting scorer");
395                                         if let Err(e) = $persister.persist_scorer(&scorer) {
396                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
397                                         }
398                                 }
399                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
400                         }
401
402                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
403                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
404                                 $chain_monitor.rebroadcast_pending_claims();
405                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
406                         }
407                 }
408
409                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
410                 // some races where users quit while channel updates were in-flight, with
411                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
412                 $persister.persist_manager(&*$channel_manager)?;
413
414                 // Persist Scorer on exit
415                 if let Some(ref scorer) = $scorer {
416                         $persister.persist_scorer(&scorer)?;
417                 }
418
419                 // Persist NetworkGraph on exit
420                 if let Some(network_graph) = $gossip_sync.network_graph() {
421                         $persister.persist_graph(network_graph)?;
422                 }
423
424                 Ok(())
425         } }
426 }
427
428 #[cfg(feature = "futures")]
429 pub(crate) mod futures_util {
430         use core::future::Future;
431         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
432         use core::pin::Pin;
433         use core::marker::Unpin;
434         pub(crate) struct Selector<
435                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
436         > {
437                 pub a: A,
438                 pub b: B,
439                 pub c: C,
440         }
441         pub(crate) enum SelectorOutput {
442                 A, B, C(bool),
443         }
444
445         impl<
446                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
447         > Future for Selector<A, B, C> {
448                 type Output = SelectorOutput;
449                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
450                         match Pin::new(&mut self.a).poll(ctx) {
451                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
452                                 Poll::Pending => {},
453                         }
454                         match Pin::new(&mut self.b).poll(ctx) {
455                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
456                                 Poll::Pending => {},
457                         }
458                         match Pin::new(&mut self.c).poll(ctx) {
459                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
460                                 Poll::Pending => {},
461                         }
462                         Poll::Pending
463                 }
464         }
465
466         // If we want to poll a future without an async context to figure out if it has completed or
467         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
468         // but sadly there's a good bit of boilerplate here.
469         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
470         fn dummy_waker_action(_: *const ()) { }
471
472         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
473                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
474         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
475 }
476 #[cfg(feature = "futures")]
477 use futures_util::{Selector, SelectorOutput, dummy_waker};
478 #[cfg(feature = "futures")]
479 use core::task;
480
481 /// Processes background events in a future.
482 ///
483 /// `sleeper` should return a future which completes in the given amount of time and returns a
484 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
485 /// future which outputs `true`, the loop will exit and this function's future will complete.
486 /// The `sleeper` future is free to return early after it has triggered the exit condition.
487 ///
488 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
489 ///
490 /// Requires the `futures` feature. Note that while this method is available without the `std`
491 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
492 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
493 /// manually instead.
494 ///
495 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
496 /// mobile device, where we may need to check for interruption of the application regularly. If you
497 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
498 /// are hundreds or thousands of simultaneous process calls running.
499 ///
500 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
501 /// could setup `process_events_async` like this:
502 /// ```
503 /// # struct MyPersister {}
504 /// # impl lightning::util::persist::KVStorePersister for MyPersister {
505 /// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
506 /// # }
507 /// # struct MyEventHandler {}
508 /// # impl MyEventHandler {
509 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
510 /// # }
511 /// # #[derive(Eq, PartialEq, Clone, Hash)]
512 /// # struct MySocketDescriptor {}
513 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
514 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
515 /// #     fn disconnect_socket(&mut self) {}
516 /// # }
517 /// # use std::sync::{Arc, Mutex};
518 /// # use std::sync::atomic::{AtomicBool, Ordering};
519 /// # use lightning_background_processor::{process_events_async, GossipSync};
520 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
521 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
522 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
523 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
524 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
525 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
526 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
527 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
528 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
529 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
530 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
531 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
532 ///
533 /// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
534 ///     let background_persister = Arc::clone(&my_persister);
535 ///     let background_event_handler = Arc::clone(&my_event_handler);
536 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
537 ///     let background_chan_man = Arc::clone(&my_channel_manager);
538 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
539 ///     let background_peer_man = Arc::clone(&my_peer_manager);
540 ///     let background_logger = Arc::clone(&my_logger);
541 ///     let background_scorer = Arc::clone(&my_scorer);
542 ///
543 ///     // Setup the sleeper.
544 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
545 ///
546 ///     let sleeper = move |d| {
547 ///             let mut receiver = stop_receiver.clone();
548 ///             Box::pin(async move {
549 ///                     tokio::select!{
550 ///                             _ = tokio::time::sleep(d) => false,
551 ///                             _ = receiver.changed() => true,
552 ///                     }
553 ///             })
554 ///     };
555 ///
556 ///     let mobile_interruptable_platform = false;
557 ///
558 ///     let handle = tokio::spawn(async move {
559 ///             process_events_async(
560 ///                     background_persister,
561 ///                     |e| background_event_handler.handle_event(e),
562 ///                     background_chain_mon,
563 ///                     background_chan_man,
564 ///                     background_gossip_sync,
565 ///                     background_peer_man,
566 ///                     background_logger,
567 ///                     Some(background_scorer),
568 ///                     sleeper,
569 ///                     mobile_interruptable_platform,
570 ///                     )
571 ///                     .await
572 ///                     .expect("Failed to process events");
573 ///     });
574 ///
575 ///     // Stop the background processing.
576 ///     stop_sender.send(()).unwrap();
577 ///     handle.await.unwrap();
578 ///     # }
579 ///```
580 #[cfg(feature = "futures")]
581 pub async fn process_events_async<
582         'a,
583         UL: 'static + Deref + Send + Sync,
584         CF: 'static + Deref + Send + Sync,
585         CW: 'static + Deref + Send + Sync,
586         T: 'static + Deref + Send + Sync,
587         ES: 'static + Deref + Send + Sync,
588         NS: 'static + Deref + Send + Sync,
589         SP: 'static + Deref + Send + Sync,
590         F: 'static + Deref + Send + Sync,
591         R: 'static + Deref + Send + Sync,
592         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
593         L: 'static + Deref + Send + Sync,
594         P: 'static + Deref + Send + Sync,
595         EventHandlerFuture: core::future::Future<Output = ()>,
596         EventHandler: Fn(Event) -> EventHandlerFuture,
597         PS: 'static + Deref + Send,
598         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
599         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
600         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
601         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
602         APM: APeerManager + Send + Sync,
603         PM: 'static + Deref<Target = APM> + Send + Sync,
604         S: 'static + Deref<Target = SC> + Send + Sync,
605         SC: for<'b> WriteableScore<'b>,
606         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
607         Sleeper: Fn(Duration) -> SleepFuture
608 >(
609         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
610         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
611         sleeper: Sleeper, mobile_interruptable_platform: bool,
612 ) -> Result<(), lightning::io::Error>
613 where
614         UL::Target: 'static + UtxoLookup,
615         CF::Target: 'static + chain::Filter,
616         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
617         T::Target: 'static + BroadcasterInterface,
618         ES::Target: 'static + EntropySource,
619         NS::Target: 'static + NodeSigner,
620         SP::Target: 'static + SignerProvider,
621         F::Target: 'static + FeeEstimator,
622         R::Target: 'static + Router,
623         L::Target: 'static + Logger,
624         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
625         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
626 {
627         let mut should_break = false;
628         let async_event_handler = |event| {
629                 let network_graph = gossip_sync.network_graph();
630                 let event_handler = &event_handler;
631                 let scorer = &scorer;
632                 let logger = &logger;
633                 let persister = &persister;
634                 async move {
635                         if let Some(network_graph) = network_graph {
636                                 handle_network_graph_update(network_graph, &event)
637                         }
638                         if let Some(ref scorer) = scorer {
639                                 if update_scorer(scorer, &event) {
640                                         log_trace!(logger, "Persisting scorer after update");
641                                         if let Err(e) = persister.persist_scorer(&scorer) {
642                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
643                                         }
644                                 }
645                         }
646                         event_handler(event).await;
647                 }
648         };
649         define_run_body!(persister,
650                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
651                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
652                 gossip_sync, peer_manager, logger, scorer, should_break, {
653                         let fut = Selector {
654                                 a: channel_manager.get_persistable_update_future(),
655                                 b: chain_monitor.get_update_future(),
656                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
657                         };
658                         match fut.await {
659                                 SelectorOutput::A => true,
660                                 SelectorOutput::B => false,
661                                 SelectorOutput::C(exit) => {
662                                         should_break = exit;
663                                         false
664                                 }
665                         }
666                 }, |t| sleeper(Duration::from_secs(t)),
667                 |fut: &mut SleepFuture, _| {
668                         let mut waker = dummy_waker();
669                         let mut ctx = task::Context::from_waker(&mut waker);
670                         match core::pin::Pin::new(fut).poll(&mut ctx) {
671                                 task::Poll::Ready(exit) => { should_break = exit; true },
672                                 task::Poll::Pending => false,
673                         }
674                 }, mobile_interruptable_platform)
675 }
676
677 #[cfg(feature = "std")]
678 impl BackgroundProcessor {
679         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
680         /// documentation].
681         ///
682         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
683         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
684         /// either [`join`] or [`stop`].
685         ///
686         /// # Data Persistence
687         ///
688         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
689         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
690         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
691         /// provided implementation.
692         ///
693         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
694         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
695         /// See the `lightning-persister` crate for LDK's provided implementation.
696         ///
697         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
698         /// error or call [`join`] and handle any error that may arise. For the latter case,
699         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
700         ///
701         /// # Event Handling
702         ///
703         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
704         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
705         /// functionality implemented by other handlers.
706         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
707         ///
708         /// # Rapid Gossip Sync
709         ///
710         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
711         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
712         /// until the [`RapidGossipSync`] instance completes its first sync.
713         ///
714         /// [top-level documentation]: BackgroundProcessor
715         /// [`join`]: Self::join
716         /// [`stop`]: Self::stop
717         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
718         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
719         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
720         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
721         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
722         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
723         pub fn start<
724                 'a,
725                 UL: 'static + Deref + Send + Sync,
726                 CF: 'static + Deref + Send + Sync,
727                 CW: 'static + Deref + Send + Sync,
728                 T: 'static + Deref + Send + Sync,
729                 ES: 'static + Deref + Send + Sync,
730                 NS: 'static + Deref + Send + Sync,
731                 SP: 'static + Deref + Send + Sync,
732                 F: 'static + Deref + Send + Sync,
733                 R: 'static + Deref + Send + Sync,
734                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
735                 L: 'static + Deref + Send + Sync,
736                 P: 'static + Deref + Send + Sync,
737                 EH: 'static + EventHandler + Send,
738                 PS: 'static + Deref + Send,
739                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
740                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
741                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
742                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
743                 APM: APeerManager + Send + Sync,
744                 PM: 'static + Deref<Target = APM> + Send + Sync,
745                 S: 'static + Deref<Target = SC> + Send + Sync,
746                 SC: for <'b> WriteableScore<'b>,
747         >(
748                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
749                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
750         ) -> Self
751         where
752                 UL::Target: 'static + UtxoLookup,
753                 CF::Target: 'static + chain::Filter,
754                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
755                 T::Target: 'static + BroadcasterInterface,
756                 ES::Target: 'static + EntropySource,
757                 NS::Target: 'static + NodeSigner,
758                 SP::Target: 'static + SignerProvider,
759                 F::Target: 'static + FeeEstimator,
760                 R::Target: 'static + Router,
761                 L::Target: 'static + Logger,
762                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
763                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
764         {
765                 let stop_thread = Arc::new(AtomicBool::new(false));
766                 let stop_thread_clone = stop_thread.clone();
767                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
768                         let event_handler = |event| {
769                                 let network_graph = gossip_sync.network_graph();
770                                 if let Some(network_graph) = network_graph {
771                                         handle_network_graph_update(network_graph, &event)
772                                 }
773                                 if let Some(ref scorer) = scorer {
774                                         if update_scorer(scorer, &event) {
775                                                 log_trace!(logger, "Persisting scorer after update");
776                                                 if let Err(e) = persister.persist_scorer(&scorer) {
777                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
778                                                 }
779                                         }
780                                 }
781                                 event_handler.handle_event(event);
782                         };
783                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
784                                 channel_manager, channel_manager.process_pending_events(&event_handler),
785                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
786                                 Sleeper::from_two_futures(
787                                         channel_manager.get_persistable_update_future(),
788                                         chain_monitor.get_update_future()
789                                 ).wait_timeout(Duration::from_millis(100)),
790                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
791                 });
792                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
793         }
794
795         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
796         /// [`ChannelManager`].
797         ///
798         /// # Panics
799         ///
800         /// This function panics if the background thread has panicked such as while persisting or
801         /// handling events.
802         ///
803         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
804         pub fn join(mut self) -> Result<(), std::io::Error> {
805                 assert!(self.thread_handle.is_some());
806                 self.join_thread()
807         }
808
809         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
810         /// [`ChannelManager`].
811         ///
812         /// # Panics
813         ///
814         /// This function panics if the background thread has panicked such as while persisting or
815         /// handling events.
816         ///
817         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
818         pub fn stop(mut self) -> Result<(), std::io::Error> {
819                 assert!(self.thread_handle.is_some());
820                 self.stop_and_join_thread()
821         }
822
823         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
824                 self.stop_thread.store(true, Ordering::Release);
825                 self.join_thread()
826         }
827
828         fn join_thread(&mut self) -> Result<(), std::io::Error> {
829                 match self.thread_handle.take() {
830                         Some(handle) => handle.join().unwrap(),
831                         None => Ok(()),
832                 }
833         }
834 }
835
836 #[cfg(feature = "std")]
837 impl Drop for BackgroundProcessor {
838         fn drop(&mut self) {
839                 self.stop_and_join_thread().unwrap();
840         }
841 }
842
843 #[cfg(all(feature = "std", test))]
844 mod tests {
845         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
846         use bitcoin::blockdata::locktime::PackedLockTime;
847         use bitcoin::blockdata::transaction::{Transaction, TxOut};
848         use bitcoin::network::constants::Network;
849         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
850         use lightning::chain::{BestBlock, Confirm, chainmonitor};
851         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
852         use lightning::sign::{InMemorySigner, KeysManager};
853         use lightning::chain::transaction::OutPoint;
854         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
855         use lightning::{get_event_msg, get_event};
856         use lightning::ln::PaymentHash;
857         use lightning::ln::channelmanager;
858         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
859         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
860         use lightning::ln::functional_test_utils::*;
861         use lightning::ln::msgs::{ChannelMessageHandler, Init};
862         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
863         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
864         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
865         use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp};
866         use lightning::util::config::UserConfig;
867         use lightning::util::ser::Writeable;
868         use lightning::util::test_utils;
869         use lightning::util::persist::KVStorePersister;
870         use lightning_persister::FilesystemPersister;
871         use std::collections::VecDeque;
872         use std::{fs, env};
873         use std::path::PathBuf;
874         use std::sync::{Arc, Mutex};
875         use std::sync::mpsc::SyncSender;
876         use std::time::Duration;
877         use lightning_rapid_gossip_sync::RapidGossipSync;
878         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
879
880         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
881
882         #[derive(Clone, Hash, PartialEq, Eq)]
883         struct TestDescriptor{}
884         impl SocketDescriptor for TestDescriptor {
885                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
886                         0
887                 }
888
889                 fn disconnect_socket(&mut self) {}
890         }
891
892         type ChannelManager =
893                 channelmanager::ChannelManager<
894                         Arc<ChainMonitor>,
895                         Arc<test_utils::TestBroadcaster>,
896                         Arc<KeysManager>,
897                         Arc<KeysManager>,
898                         Arc<KeysManager>,
899                         Arc<test_utils::TestFeeEstimator>,
900                         Arc<DefaultRouter<
901                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
902                                 Arc<test_utils::TestLogger>,
903                                 Arc<Mutex<TestScorer>>,
904                                 (),
905                                 TestScorer>
906                         >,
907                         Arc<test_utils::TestLogger>>;
908
909         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
910
911         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
912         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
913
914         struct Node {
915                 node: Arc<ChannelManager>,
916                 p2p_gossip_sync: PGS,
917                 rapid_gossip_sync: RGS,
918                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
919                 chain_monitor: Arc<ChainMonitor>,
920                 persister: Arc<FilesystemPersister>,
921                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
922                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
923                 logger: Arc<test_utils::TestLogger>,
924                 best_block: BestBlock,
925                 scorer: Arc<Mutex<TestScorer>>,
926         }
927
928         impl Node {
929                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
930                         GossipSync::P2P(self.p2p_gossip_sync.clone())
931                 }
932
933                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
934                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
935                 }
936
937                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
938                         GossipSync::None
939                 }
940         }
941
942         impl Drop for Node {
943                 fn drop(&mut self) {
944                         let data_dir = self.persister.get_data_dir();
945                         match fs::remove_dir_all(data_dir.clone()) {
946                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
947                                 _ => {}
948                         }
949                 }
950         }
951
952         struct Persister {
953                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
954                 graph_persistence_notifier: Option<SyncSender<()>>,
955                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
956                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
957                 filesystem_persister: FilesystemPersister,
958         }
959
960         impl Persister {
961                 fn new(data_dir: String) -> Self {
962                         let filesystem_persister = FilesystemPersister::new(data_dir);
963                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
964                 }
965
966                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
967                         Self { graph_error: Some((error, message)), ..self }
968                 }
969
970                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
971                         Self { graph_persistence_notifier: Some(sender), ..self }
972                 }
973
974                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
975                         Self { manager_error: Some((error, message)), ..self }
976                 }
977
978                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
979                         Self { scorer_error: Some((error, message)), ..self }
980                 }
981         }
982
983         impl KVStorePersister for Persister {
984                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
985                         if key == "manager" {
986                                 if let Some((error, message)) = self.manager_error {
987                                         return Err(std::io::Error::new(error, message))
988                                 }
989                         }
990
991                         if key == "network_graph" {
992                                 if let Some(sender) = &self.graph_persistence_notifier {
993                                         match sender.send(()) {
994                                                 Ok(()) => {},
995                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
996                                         }
997                                 };
998
999                                 if let Some((error, message)) = self.graph_error {
1000                                         return Err(std::io::Error::new(error, message))
1001                                 }
1002                         }
1003
1004                         if key == "scorer" {
1005                                 if let Some((error, message)) = self.scorer_error {
1006                                         return Err(std::io::Error::new(error, message))
1007                                 }
1008                         }
1009
1010                         self.filesystem_persister.persist(key, object)
1011                 }
1012         }
1013
1014         struct TestScorer {
1015                 event_expectations: Option<VecDeque<TestResult>>,
1016         }
1017
1018         #[derive(Debug)]
1019         enum TestResult {
1020                 PaymentFailure { path: Path, short_channel_id: u64 },
1021                 PaymentSuccess { path: Path },
1022                 ProbeFailure { path: Path },
1023                 ProbeSuccess { path: Path },
1024         }
1025
1026         impl TestScorer {
1027                 fn new() -> Self {
1028                         Self { event_expectations: None }
1029                 }
1030
1031                 fn expect(&mut self, expectation: TestResult) {
1032                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1033                 }
1034         }
1035
1036         impl lightning::util::ser::Writeable for TestScorer {
1037                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1038         }
1039
1040         impl ScoreLookUp for TestScorer {
1041                 type ScoreParams = ();
1042                 fn channel_penalty_msat(
1043                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1044                 ) -> u64 { unimplemented!(); }
1045         }
1046
1047         impl ScoreUpdate for TestScorer {
1048                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1049                         if let Some(expectations) = &mut self.event_expectations {
1050                                 match expectations.pop_front().unwrap() {
1051                                         TestResult::PaymentFailure { path, short_channel_id } => {
1052                                                 assert_eq!(actual_path, &path);
1053                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1054                                         },
1055                                         TestResult::PaymentSuccess { path } => {
1056                                                 panic!("Unexpected successful payment path: {:?}", path)
1057                                         },
1058                                         TestResult::ProbeFailure { path } => {
1059                                                 panic!("Unexpected probe failure: {:?}", path)
1060                                         },
1061                                         TestResult::ProbeSuccess { path } => {
1062                                                 panic!("Unexpected probe success: {:?}", path)
1063                                         }
1064                                 }
1065                         }
1066                 }
1067
1068                 fn payment_path_successful(&mut self, actual_path: &Path) {
1069                         if let Some(expectations) = &mut self.event_expectations {
1070                                 match expectations.pop_front().unwrap() {
1071                                         TestResult::PaymentFailure { path, .. } => {
1072                                                 panic!("Unexpected payment path failure: {:?}", path)
1073                                         },
1074                                         TestResult::PaymentSuccess { path } => {
1075                                                 assert_eq!(actual_path, &path);
1076                                         },
1077                                         TestResult::ProbeFailure { path } => {
1078                                                 panic!("Unexpected probe failure: {:?}", path)
1079                                         },
1080                                         TestResult::ProbeSuccess { path } => {
1081                                                 panic!("Unexpected probe success: {:?}", path)
1082                                         }
1083                                 }
1084                         }
1085                 }
1086
1087                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1088                         if let Some(expectations) = &mut self.event_expectations {
1089                                 match expectations.pop_front().unwrap() {
1090                                         TestResult::PaymentFailure { path, .. } => {
1091                                                 panic!("Unexpected payment path failure: {:?}", path)
1092                                         },
1093                                         TestResult::PaymentSuccess { path } => {
1094                                                 panic!("Unexpected payment path success: {:?}", path)
1095                                         },
1096                                         TestResult::ProbeFailure { path } => {
1097                                                 assert_eq!(actual_path, &path);
1098                                         },
1099                                         TestResult::ProbeSuccess { path } => {
1100                                                 panic!("Unexpected probe success: {:?}", path)
1101                                         }
1102                                 }
1103                         }
1104                 }
1105                 fn probe_successful(&mut self, actual_path: &Path) {
1106                         if let Some(expectations) = &mut self.event_expectations {
1107                                 match expectations.pop_front().unwrap() {
1108                                         TestResult::PaymentFailure { path, .. } => {
1109                                                 panic!("Unexpected payment path failure: {:?}", path)
1110                                         },
1111                                         TestResult::PaymentSuccess { path } => {
1112                                                 panic!("Unexpected payment path success: {:?}", path)
1113                                         },
1114                                         TestResult::ProbeFailure { path } => {
1115                                                 panic!("Unexpected probe failure: {:?}", path)
1116                                         },
1117                                         TestResult::ProbeSuccess { path } => {
1118                                                 assert_eq!(actual_path, &path);
1119                                         }
1120                                 }
1121                         }
1122                 }
1123         }
1124
1125         impl Drop for TestScorer {
1126                 fn drop(&mut self) {
1127                         if std::thread::panicking() {
1128                                 return;
1129                         }
1130
1131                         if let Some(event_expectations) = &self.event_expectations {
1132                                 if !event_expectations.is_empty() {
1133                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1134                                 }
1135                         }
1136                 }
1137         }
1138
1139         fn get_full_filepath(filepath: String, filename: String) -> String {
1140                 let mut path = PathBuf::from(filepath);
1141                 path.push(filename);
1142                 path.to_str().unwrap().to_string()
1143         }
1144
1145         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1146                 let persist_temp_path = env::temp_dir().join(persist_dir);
1147                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1148                 let network = Network::Bitcoin;
1149                 let mut nodes = Vec::new();
1150                 for i in 0..num_nodes {
1151                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1152                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1153                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1154                         let genesis_block = genesis_block(network);
1155                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1156                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1157                         let seed = [i as u8; 32];
1158                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
1159                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1160                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
1161                         let now = Duration::from_secs(genesis_block.header.time as u64);
1162                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1163                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1164                         let best_block = BestBlock::from_network(network);
1165                         let params = ChainParameters { network, best_block };
1166                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1167                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1168                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1169                         let msg_handler = MessageHandler {
1170                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1171                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1172                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1173                         };
1174                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1175                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1176                         nodes.push(node);
1177                 }
1178
1179                 for i in 0..num_nodes {
1180                         for j in (i+1)..num_nodes {
1181                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1182                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1183                                 }, true).unwrap();
1184                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1185                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1186                                 }, false).unwrap();
1187                         }
1188                 }
1189
1190                 (persist_dir, nodes)
1191         }
1192
1193         macro_rules! open_channel {
1194                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1195                         begin_open_channel!($node_a, $node_b, $channel_value);
1196                         let events = $node_a.node.get_and_clear_pending_events();
1197                         assert_eq!(events.len(), 1);
1198                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1199                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1200                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1201                         get_event!($node_b, Event::ChannelPending);
1202                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1203                         get_event!($node_a, Event::ChannelPending);
1204                         tx
1205                 }}
1206         }
1207
1208         macro_rules! begin_open_channel {
1209                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1210                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1211                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1212                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1213                 }}
1214         }
1215
1216         macro_rules! handle_funding_generation_ready {
1217                 ($event: expr, $channel_value: expr) => {{
1218                         match $event {
1219                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1220                                         assert_eq!(channel_value_satoshis, $channel_value);
1221                                         assert_eq!(user_channel_id, 42);
1222
1223                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1224                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1225                                         }]};
1226                                         (temporary_channel_id, tx)
1227                                 },
1228                                 _ => panic!("Unexpected event"),
1229                         }
1230                 }}
1231         }
1232
1233         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1234                 for i in 1..=depth {
1235                         let prev_blockhash = node.best_block.block_hash();
1236                         let height = node.best_block.height() + 1;
1237                         let header = create_dummy_header(prev_blockhash, height);
1238                         let txdata = vec![(0, tx)];
1239                         node.best_block = BestBlock::new(header.block_hash(), height);
1240                         match i {
1241                                 1 => {
1242                                         node.node.transactions_confirmed(&header, &txdata, height);
1243                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1244                                 },
1245                                 x if x == depth => {
1246                                         node.node.best_block_updated(&header, height);
1247                                         node.chain_monitor.best_block_updated(&header, height);
1248                                 },
1249                                 _ => {},
1250                         }
1251                 }
1252         }
1253         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1254                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1255         }
1256
1257         #[test]
1258         fn test_background_processor() {
1259                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1260                 // updates. Also test that when new updates are available, the manager signals that it needs
1261                 // re-persistence and is successfully re-persisted.
1262                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1263
1264                 // Go through the channel creation process so that each node has something to persist. Since
1265                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1266                 // avoid a race with processing events.
1267                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1268
1269                 // Initiate the background processors to watch each node.
1270                 let data_dir = nodes[0].persister.get_data_dir();
1271                 let persister = Arc::new(Persister::new(data_dir));
1272                 let event_handler = |_: _| {};
1273                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1274
1275                 macro_rules! check_persisted_data {
1276                         ($node: expr, $filepath: expr) => {
1277                                 let mut expected_bytes = Vec::new();
1278                                 loop {
1279                                         expected_bytes.clear();
1280                                         match $node.write(&mut expected_bytes) {
1281                                                 Ok(()) => {
1282                                                         match std::fs::read($filepath) {
1283                                                                 Ok(bytes) => {
1284                                                                         if bytes == expected_bytes {
1285                                                                                 break
1286                                                                         } else {
1287                                                                                 continue
1288                                                                         }
1289                                                                 },
1290                                                                 Err(_) => continue
1291                                                         }
1292                                                 },
1293                                                 Err(e) => panic!("Unexpected error: {}", e)
1294                                         }
1295                                 }
1296                         }
1297                 }
1298
1299                 // Check that the initial channel manager data is persisted as expected.
1300                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1301                 check_persisted_data!(nodes[0].node, filepath.clone());
1302
1303                 loop {
1304                         if !nodes[0].node.get_persistence_condvar_value() { break }
1305                 }
1306
1307                 // Force-close the channel.
1308                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1309
1310                 // Check that the force-close updates are persisted.
1311                 check_persisted_data!(nodes[0].node, filepath.clone());
1312                 loop {
1313                         if !nodes[0].node.get_persistence_condvar_value() { break }
1314                 }
1315
1316                 // Check network graph is persisted
1317                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1318                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1319
1320                 // Check scorer is persisted
1321                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1322                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1323
1324                 if !std::thread::panicking() {
1325                         bg_processor.stop().unwrap();
1326                 }
1327         }
1328
1329         #[test]
1330         fn test_timer_tick_called() {
1331                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1332                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1333                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1334                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1335                 let data_dir = nodes[0].persister.get_data_dir();
1336                 let persister = Arc::new(Persister::new(data_dir));
1337                 let event_handler = |_: _| {};
1338                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1339                 loop {
1340                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1341                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1342                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1343                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1344                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1345                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1346                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1347                                 break
1348                         }
1349                 }
1350
1351                 if !std::thread::panicking() {
1352                         bg_processor.stop().unwrap();
1353                 }
1354         }
1355
1356         #[test]
1357         fn test_channel_manager_persist_error() {
1358                 // Test that if we encounter an error during manager persistence, the thread panics.
1359                 let (_, nodes) = create_nodes(2, "test_persist_error");
1360                 open_channel!(nodes[0], nodes[1], 100000);
1361
1362                 let data_dir = nodes[0].persister.get_data_dir();
1363                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1364                 let event_handler = |_: _| {};
1365                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1366                 match bg_processor.join() {
1367                         Ok(_) => panic!("Expected error persisting manager"),
1368                         Err(e) => {
1369                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1370                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1371                         },
1372                 }
1373         }
1374
1375         #[tokio::test]
1376         #[cfg(feature = "futures")]
1377         async fn test_channel_manager_persist_error_async() {
1378                 // Test that if we encounter an error during manager persistence, the thread panics.
1379                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1380                 open_channel!(nodes[0], nodes[1], 100000);
1381
1382                 let data_dir = nodes[0].persister.get_data_dir();
1383                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1384
1385                 let bp_future = super::process_events_async(
1386                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1387                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1388                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1389                                 Box::pin(async move {
1390                                         tokio::time::sleep(dur).await;
1391                                         false // Never exit
1392                                 })
1393                         }, false,
1394                 );
1395                 match bp_future.await {
1396                         Ok(_) => panic!("Expected error persisting manager"),
1397                         Err(e) => {
1398                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1399                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1400                         },
1401                 }
1402         }
1403
1404         #[test]
1405         fn test_network_graph_persist_error() {
1406                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1407                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1408                 let data_dir = nodes[0].persister.get_data_dir();
1409                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1410                 let event_handler = |_: _| {};
1411                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1412
1413                 match bg_processor.stop() {
1414                         Ok(_) => panic!("Expected error persisting network graph"),
1415                         Err(e) => {
1416                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1417                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1418                         },
1419                 }
1420         }
1421
1422         #[test]
1423         fn test_scorer_persist_error() {
1424                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1425                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1426                 let data_dir = nodes[0].persister.get_data_dir();
1427                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1428                 let event_handler = |_: _| {};
1429                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1430
1431                 match bg_processor.stop() {
1432                         Ok(_) => panic!("Expected error persisting scorer"),
1433                         Err(e) => {
1434                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1435                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1436                         },
1437                 }
1438         }
1439
1440         #[test]
1441         fn test_background_event_handling() {
1442                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1443                 let channel_value = 100000;
1444                 let data_dir = nodes[0].persister.get_data_dir();
1445                 let persister = Arc::new(Persister::new(data_dir.clone()));
1446
1447                 // Set up a background event handler for FundingGenerationReady events.
1448                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1449                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1450                 let event_handler = move |event: Event| match event {
1451                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1452                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1453                         Event::ChannelReady { .. } => {},
1454                         _ => panic!("Unexpected event: {:?}", event),
1455                 };
1456
1457                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1458
1459                 // Open a channel and check that the FundingGenerationReady event was handled.
1460                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1461                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1462                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1463                         .expect("FundingGenerationReady not handled within deadline");
1464                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1465                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1466                 get_event!(nodes[1], Event::ChannelPending);
1467                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1468                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1469                         .expect("ChannelPending not handled within deadline");
1470
1471                 // Confirm the funding transaction.
1472                 confirm_transaction(&mut nodes[0], &funding_tx);
1473                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1474                 confirm_transaction(&mut nodes[1], &funding_tx);
1475                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1476                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1477                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1478                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1479                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1480
1481                 if !std::thread::panicking() {
1482                         bg_processor.stop().unwrap();
1483                 }
1484
1485                 // Set up a background event handler for SpendableOutputs events.
1486                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1487                 let event_handler = move |event: Event| match event {
1488                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1489                         Event::ChannelReady { .. } => {},
1490                         Event::ChannelClosed { .. } => {},
1491                         _ => panic!("Unexpected event: {:?}", event),
1492                 };
1493                 let persister = Arc::new(Persister::new(data_dir));
1494                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1495
1496                 // Force close the channel and check that the SpendableOutputs event was handled.
1497                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1498                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1499                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1500
1501                 let event = receiver
1502                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1503                         .expect("Events not handled within deadline");
1504                 match event {
1505                         Event::SpendableOutputs { .. } => {},
1506                         _ => panic!("Unexpected event: {:?}", event),
1507                 }
1508
1509                 if !std::thread::panicking() {
1510                         bg_processor.stop().unwrap();
1511                 }
1512         }
1513
1514         #[test]
1515         fn test_scorer_persistence() {
1516                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1517                 let data_dir = nodes[0].persister.get_data_dir();
1518                 let persister = Arc::new(Persister::new(data_dir));
1519                 let event_handler = |_: _| {};
1520                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1521
1522                 loop {
1523                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1524                         let expected_log = "Persisting scorer".to_string();
1525                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1526                                 break
1527                         }
1528                 }
1529
1530                 if !std::thread::panicking() {
1531                         bg_processor.stop().unwrap();
1532                 }
1533         }
1534
1535         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1536                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1537                         let features = ChannelFeatures::empty();
1538                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1539                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1540                         ).expect("Failed to update channel from partial announcement");
1541                         let original_graph_description = $nodes[0].network_graph.to_string();
1542                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1543                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1544
1545                         loop {
1546                                 $sleep;
1547                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1548                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1549                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1550                                         .unwrap_or(&0) > 1
1551                                 {
1552                                         // Wait until the loop has gone around at least twice.
1553                                         break
1554                                 }
1555                         }
1556
1557                         let initialization_input = vec![
1558                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1559                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1560                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1561                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1562                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1563                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1564                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1565                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1566                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1567                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1568                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1569                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1570                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1571                         ];
1572                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1573
1574                         // this should have added two channels and pruned the previous one.
1575                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1576
1577                         $receive.expect("Network graph not pruned within deadline");
1578
1579                         // all channels should now be pruned
1580                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1581                 }
1582         }
1583
1584         #[test]
1585         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1586                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1587
1588                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1589                 let data_dir = nodes[0].persister.get_data_dir();
1590                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1591
1592                 let event_handler = |_: _| {};
1593                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1594
1595                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1596                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1597                         std::thread::sleep(Duration::from_millis(1)));
1598
1599                 background_processor.stop().unwrap();
1600         }
1601
1602         #[tokio::test]
1603         #[cfg(feature = "futures")]
1604         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1605                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1606
1607                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1608                 let data_dir = nodes[0].persister.get_data_dir();
1609                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1610
1611                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1612                 let bp_future = super::process_events_async(
1613                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1614                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1615                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1616                                 let mut exit_receiver = exit_receiver.clone();
1617                                 Box::pin(async move {
1618                                         tokio::select! {
1619                                                 _ = tokio::time::sleep(dur) => false,
1620                                                 _ = exit_receiver.changed() => true,
1621                                         }
1622                                 })
1623                         }, false,
1624                 );
1625
1626                 let t1 = tokio::spawn(bp_future);
1627                 let t2 = tokio::spawn(async move {
1628                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1629                                 let mut i = 0;
1630                                 loop {
1631                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1632                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1633                                         assert!(i < 5);
1634                                         i += 1;
1635                                 }
1636                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1637                         exit_sender.send(()).unwrap();
1638                 });
1639                 let (r1, r2) = tokio::join!(t1, t2);
1640                 r1.unwrap().unwrap();
1641                 r2.unwrap()
1642         }
1643
1644         macro_rules! do_test_payment_path_scoring {
1645                 ($nodes: expr, $receive: expr) => {
1646                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1647                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1648                         // public or else we won't score it).
1649                         // A background event handler for FundingGenerationReady events must be hooked up to a
1650                         // running background processor.
1651                         let scored_scid = 4242;
1652                         let secp_ctx = Secp256k1::new();
1653                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1654                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1655
1656                         let path = Path { hops: vec![RouteHop {
1657                                 pubkey: node_1_id,
1658                                 node_features: NodeFeatures::empty(),
1659                                 short_channel_id: scored_scid,
1660                                 channel_features: ChannelFeatures::empty(),
1661                                 fee_msat: 0,
1662                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1663                         }], blinded_tail: None };
1664
1665                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1666                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1667                                 payment_id: None,
1668                                 payment_hash: PaymentHash([42; 32]),
1669                                 payment_failed_permanently: false,
1670                                 failure: PathFailure::OnPath { network_update: None },
1671                                 path: path.clone(),
1672                                 short_channel_id: Some(scored_scid),
1673                         });
1674                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1675                         match event {
1676                                 Event::PaymentPathFailed { .. } => {},
1677                                 _ => panic!("Unexpected event"),
1678                         }
1679
1680                         // Ensure we'll score payments that were explicitly failed back by the destination as
1681                         // ProbeSuccess.
1682                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1683                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1684                                 payment_id: None,
1685                                 payment_hash: PaymentHash([42; 32]),
1686                                 payment_failed_permanently: true,
1687                                 failure: PathFailure::OnPath { network_update: None },
1688                                 path: path.clone(),
1689                                 short_channel_id: None,
1690                         });
1691                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1692                         match event {
1693                                 Event::PaymentPathFailed { .. } => {},
1694                                 _ => panic!("Unexpected event"),
1695                         }
1696
1697                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1698                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1699                                 payment_id: PaymentId([42; 32]),
1700                                 payment_hash: None,
1701                                 path: path.clone(),
1702                         });
1703                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1704                         match event {
1705                                 Event::PaymentPathSuccessful { .. } => {},
1706                                 _ => panic!("Unexpected event"),
1707                         }
1708
1709                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1710                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1711                                 payment_id: PaymentId([42; 32]),
1712                                 payment_hash: PaymentHash([42; 32]),
1713                                 path: path.clone(),
1714                         });
1715                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1716                         match event {
1717                                 Event::ProbeSuccessful  { .. } => {},
1718                                 _ => panic!("Unexpected event"),
1719                         }
1720
1721                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1722                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1723                                 payment_id: PaymentId([42; 32]),
1724                                 payment_hash: PaymentHash([42; 32]),
1725                                 path,
1726                                 short_channel_id: Some(scored_scid),
1727                         });
1728                         let event = $receive.expect("ProbeFailure not handled within deadline");
1729                         match event {
1730                                 Event::ProbeFailed { .. } => {},
1731                                 _ => panic!("Unexpected event"),
1732                         }
1733                 }
1734         }
1735
1736         #[test]
1737         fn test_payment_path_scoring() {
1738                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1739                 let event_handler = move |event: Event| match event {
1740                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1741                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1742                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1743                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1744                         _ => panic!("Unexpected event: {:?}", event),
1745                 };
1746
1747                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1748                 let data_dir = nodes[0].persister.get_data_dir();
1749                 let persister = Arc::new(Persister::new(data_dir));
1750                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1751
1752                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1753
1754                 if !std::thread::panicking() {
1755                         bg_processor.stop().unwrap();
1756                 }
1757
1758                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1759                 let expected_log = "Persisting scorer after update".to_string();
1760                 assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1761         }
1762
1763         #[tokio::test]
1764         #[cfg(feature = "futures")]
1765         async fn test_payment_path_scoring_async() {
1766                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1767                 let event_handler = move |event: Event| {
1768                         let sender_ref = sender.clone();
1769                         async move {
1770                                 match event {
1771                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1772                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1773                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1774                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1775                                         _ => panic!("Unexpected event: {:?}", event),
1776                                 }
1777                         }
1778                 };
1779
1780                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1781                 let data_dir = nodes[0].persister.get_data_dir();
1782                 let persister = Arc::new(Persister::new(data_dir));
1783
1784                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1785
1786                 let bp_future = super::process_events_async(
1787                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1788                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1789                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1790                                 let mut exit_receiver = exit_receiver.clone();
1791                                 Box::pin(async move {
1792                                         tokio::select! {
1793                                                 _ = tokio::time::sleep(dur) => false,
1794                                                 _ = exit_receiver.changed() => true,
1795                                         }
1796                                 })
1797                         }, false,
1798                 );
1799                 let t1 = tokio::spawn(bp_future);
1800                 let t2 = tokio::spawn(async move {
1801                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1802                         exit_sender.send(()).unwrap();
1803
1804                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1805                         let expected_log = "Persisting scorer after update".to_string();
1806                         assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1807                 });
1808
1809                 let (r1, r2) = tokio::join!(t1, t2);
1810                 r1.unwrap().unwrap();
1811                 r2.unwrap()
1812         }
1813 }