586fac9306cd6aa589a553e6deb4238214dffed3
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{Score, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
43
44 use core::ops::Deref;
45 use core::time::Duration;
46
47 #[cfg(feature = "std")]
48 use std::sync::Arc;
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
55
56 #[cfg(not(feature = "std"))]
57 use alloc::vec::Vec;
58
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 ///   writing it to disk/backups by invoking the callback given to it at startup.
65 ///   [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
70 ///
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
73 ///
74 /// # Note
75 ///
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
80 ///
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88         stop_thread: Arc<AtomicBool>,
89         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
90 }
91
92 #[cfg(not(test))]
93 const FRESHNESS_TIMER: u64 = 60;
94 #[cfg(test)]
95 const FRESHNESS_TIMER: u64 = 1;
96
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
104 #[cfg(test)]
105 const PING_TIMER: u64 = 1;
106
107 /// Prune the network graph of stale entries hourly.
108 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
109
110 #[cfg(not(test))]
111 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
112 #[cfg(test)]
113 const SCORER_PERSIST_TIMER: u64 = 1;
114
115 #[cfg(not(test))]
116 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
117 #[cfg(test)]
118 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
119
120 #[cfg(not(test))]
121 const REBROADCAST_TIMER: u64 = 30;
122 #[cfg(test)]
123 const REBROADCAST_TIMER: u64 = 1;
124
125 #[cfg(feature = "futures")]
126 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
127 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
128 #[cfg(feature = "futures")]
129 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
130         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
131
132 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
133 pub enum GossipSync<
134         P: Deref<Target = P2PGossipSync<G, U, L>>,
135         R: Deref<Target = RapidGossipSync<G, L>>,
136         G: Deref<Target = NetworkGraph<L>>,
137         U: Deref,
138         L: Deref,
139 >
140 where U::Target: UtxoLookup, L::Target: Logger {
141         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
142         P2P(P),
143         /// Rapid gossip sync from a trusted server.
144         Rapid(R),
145         /// No gossip sync.
146         None,
147 }
148
149 impl<
150         P: Deref<Target = P2PGossipSync<G, U, L>>,
151         R: Deref<Target = RapidGossipSync<G, L>>,
152         G: Deref<Target = NetworkGraph<L>>,
153         U: Deref,
154         L: Deref,
155 > GossipSync<P, R, G, U, L>
156 where U::Target: UtxoLookup, L::Target: Logger {
157         fn network_graph(&self) -> Option<&G> {
158                 match self {
159                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
161                         GossipSync::None => None,
162                 }
163         }
164
165         fn prunable_network_graph(&self) -> Option<&G> {
166                 match self {
167                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168                         GossipSync::Rapid(gossip_sync) => {
169                                 if gossip_sync.is_initial_sync_complete() {
170                                         Some(gossip_sync.network_graph())
171                                 } else {
172                                         None
173                                 }
174                         },
175                         GossipSync::None => None,
176                 }
177         }
178 }
179
180 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
181 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
182         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
183 where
184         U::Target: UtxoLookup,
185         L::Target: Logger,
186 {
187         /// Initializes a new [`GossipSync::P2P`] variant.
188         pub fn p2p(gossip_sync: P) -> Self {
189                 GossipSync::P2P(gossip_sync)
190         }
191 }
192
193 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
194 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
195         GossipSync<
196                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
197                 R,
198                 G,
199                 &'a (dyn UtxoLookup + Send + Sync),
200                 L,
201         >
202 where
203         L::Target: Logger,
204 {
205         /// Initializes a new [`GossipSync::Rapid`] variant.
206         pub fn rapid(gossip_sync: R) -> Self {
207                 GossipSync::Rapid(gossip_sync)
208         }
209 }
210
211 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
212 impl<'a, L: Deref>
213         GossipSync<
214                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
215                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
216                 &'a NetworkGraph<L>,
217                 &'a (dyn UtxoLookup + Send + Sync),
218                 L,
219         >
220 where
221         L::Target: Logger,
222 {
223         /// Initializes a new [`GossipSync::None`] variant.
224         pub fn none() -> Self {
225                 GossipSync::None
226         }
227 }
228
229 fn handle_network_graph_update<L: Deref>(
230         network_graph: &NetworkGraph<L>, event: &Event
231 ) where L::Target: Logger {
232         if let Event::PaymentPathFailed {
233                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
234         {
235                 network_graph.handle_network_update(upd);
236         }
237 }
238
239 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
240 /// to persist.
241 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
242         scorer: &'a S, event: &Event
243 ) -> bool {
244         let mut score = scorer.lock();
245         match event {
246                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
247                         score.payment_path_failed(path, *scid);
248                 },
249                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
250                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
251                         // because the payment made it all the way to the destination with sufficient liquidity.
252                         score.probe_successful(path);
253                 },
254                 Event::PaymentPathSuccessful { path, .. } => {
255                         score.payment_path_successful(path);
256                 },
257                 Event::ProbeSuccessful { path, .. } => {
258                         score.probe_successful(path);
259                 },
260                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
261                         score.probe_failed(path, *scid);
262                 },
263                 _ => return false,
264         }
265         true
266 }
267
268 macro_rules! define_run_body {
269         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
270          $channel_manager: ident, $process_channel_manager_events: expr,
271          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
272          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
273          $check_slow_await: expr)
274         => { {
275                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
276                 $channel_manager.timer_tick_occurred();
277                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
278                 $chain_monitor.rebroadcast_pending_claims();
279
280                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
281                 let mut last_ping_call = $get_timer(PING_TIMER);
282                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
283                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
284                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
285                 let mut have_pruned = false;
286
287                 loop {
288                         $process_channel_manager_events;
289                         $process_chain_monitor_events;
290
291                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
292                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
293                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
294                         // without running the normal event processing above and handing events to users.
295                         //
296                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
297                         // processing a message effectively at any point during this loop. In order to
298                         // minimize the time between such processing completing and persisting the updated
299                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
300                         // generally, and as a fallback place such blocking only immediately before
301                         // persistence.
302                         $peer_manager.as_ref().process_events();
303
304                         // Exit the loop if the background processor was requested to stop.
305                         if $loop_exit_check {
306                                 log_trace!($logger, "Terminating background processor.");
307                                 break;
308                         }
309
310                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
311                         // see `await_start`'s use below.
312                         let mut await_start = None;
313                         if $check_slow_await { await_start = Some($get_timer(1)); }
314                         let updates_available = $await;
315                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
316
317                         // Exit the loop if the background processor was requested to stop.
318                         if $loop_exit_check {
319                                 log_trace!($logger, "Terminating background processor.");
320                                 break;
321                         }
322
323                         if updates_available {
324                                 log_trace!($logger, "Persisting ChannelManager...");
325                                 $persister.persist_manager(&*$channel_manager)?;
326                                 log_trace!($logger, "Done persisting ChannelManager.");
327                         }
328                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
329                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
330                                 $channel_manager.timer_tick_occurred();
331                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
332                         }
333                         if await_slow {
334                                 // On various platforms, we may be starved of CPU cycles for several reasons.
335                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
336                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
337                                 // may not get any cycles.
338                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
339                                 // full second, at which point we assume sockets may have been killed (they
340                                 // appear to be at least on some platforms, even if it has only been a second).
341                                 // Note that we have to take care to not get here just because user event
342                                 // processing was slow at the top of the loop. For example, the sample client
343                                 // may call Bitcoin Core RPCs during event handling, which very often takes
344                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
345                                 // peers.
346                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
347                                 $peer_manager.as_ref().disconnect_all_peers();
348                                 last_ping_call = $get_timer(PING_TIMER);
349                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
350                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
351                                 $peer_manager.as_ref().timer_tick_occurred();
352                                 last_ping_call = $get_timer(PING_TIMER);
353                         }
354
355                         // Note that we want to run a graph prune once not long after startup before
356                         // falling back to our usual hourly prunes. This avoids short-lived clients never
357                         // pruning their network graph. We run once 60 seconds after startup before
358                         // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
359                         // we prune after an initial sync completes.
360                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
361                         let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
362                         let should_prune = match $gossip_sync {
363                                 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
364                                 _ => prune_timer_elapsed,
365                         };
366                         if should_prune {
367                                 // The network graph must not be pruned while rapid sync completion is pending
368                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
369                                         #[cfg(feature = "std")] {
370                                                 log_trace!($logger, "Pruning and persisting network graph.");
371                                                 network_graph.remove_stale_channels_and_tracking();
372                                         }
373                                         #[cfg(not(feature = "std"))] {
374                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
375                                                 log_trace!($logger, "Persisting network graph.");
376                                         }
377
378                                         if let Err(e) = $persister.persist_graph(network_graph) {
379                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
380                                         }
381
382                                         have_pruned = true;
383                                 }
384                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
385                                 last_prune_call = $get_timer(prune_timer);
386                         }
387
388                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
389                                 if let Some(ref scorer) = $scorer {
390                                         log_trace!($logger, "Persisting scorer");
391                                         if let Err(e) = $persister.persist_scorer(&scorer) {
392                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
393                                         }
394                                 }
395                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
396                         }
397
398                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
399                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
400                                 $chain_monitor.rebroadcast_pending_claims();
401                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
402                         }
403                 }
404
405                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
406                 // some races where users quit while channel updates were in-flight, with
407                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
408                 $persister.persist_manager(&*$channel_manager)?;
409
410                 // Persist Scorer on exit
411                 if let Some(ref scorer) = $scorer {
412                         $persister.persist_scorer(&scorer)?;
413                 }
414
415                 // Persist NetworkGraph on exit
416                 if let Some(network_graph) = $gossip_sync.network_graph() {
417                         $persister.persist_graph(network_graph)?;
418                 }
419
420                 Ok(())
421         } }
422 }
423
424 #[cfg(feature = "futures")]
425 pub(crate) mod futures_util {
426         use core::future::Future;
427         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
428         use core::pin::Pin;
429         use core::marker::Unpin;
430         pub(crate) struct Selector<
431                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
432         > {
433                 pub a: A,
434                 pub b: B,
435                 pub c: C,
436         }
437         pub(crate) enum SelectorOutput {
438                 A, B, C(bool),
439         }
440
441         impl<
442                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
443         > Future for Selector<A, B, C> {
444                 type Output = SelectorOutput;
445                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
446                         match Pin::new(&mut self.a).poll(ctx) {
447                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
448                                 Poll::Pending => {},
449                         }
450                         match Pin::new(&mut self.b).poll(ctx) {
451                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
452                                 Poll::Pending => {},
453                         }
454                         match Pin::new(&mut self.c).poll(ctx) {
455                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
456                                 Poll::Pending => {},
457                         }
458                         Poll::Pending
459                 }
460         }
461
462         // If we want to poll a future without an async context to figure out if it has completed or
463         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
464         // but sadly there's a good bit of boilerplate here.
465         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
466         fn dummy_waker_action(_: *const ()) { }
467
468         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
469                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
470         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
471 }
472 #[cfg(feature = "futures")]
473 use futures_util::{Selector, SelectorOutput, dummy_waker};
474 #[cfg(feature = "futures")]
475 use core::task;
476
477 /// Processes background events in a future.
478 ///
479 /// `sleeper` should return a future which completes in the given amount of time and returns a
480 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
481 /// future which outputs `true`, the loop will exit and this function's future will complete.
482 /// The `sleeper` future is free to return early after it has triggered the exit condition.
483 ///
484 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
485 ///
486 /// Requires the `futures` feature. Note that while this method is available without the `std`
487 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
488 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
489 /// manually instead.
490 ///
491 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
492 /// mobile device, where we may need to check for interruption of the application regularly. If you
493 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
494 /// are hundreds or thousands of simultaneous process calls running.
495 ///
496 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
497 /// could setup `process_events_async` like this:
498 /// ```
499 /// # struct MyPersister {}
500 /// # impl lightning::util::persist::KVStorePersister for MyPersister {
501 /// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
502 /// # }
503 /// # struct MyEventHandler {}
504 /// # impl MyEventHandler {
505 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
506 /// # }
507 /// # #[derive(Eq, PartialEq, Clone, Hash)]
508 /// # struct MySocketDescriptor {}
509 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
510 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
511 /// #     fn disconnect_socket(&mut self) {}
512 /// # }
513 /// # use std::sync::{Arc, Mutex};
514 /// # use std::sync::atomic::{AtomicBool, Ordering};
515 /// # use lightning_background_processor::{process_events_async, GossipSync};
516 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
517 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
518 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
519 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
520 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
521 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
522 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
523 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
524 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
525 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
526 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
527 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
528 ///
529 /// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
530 ///     let background_persister = Arc::clone(&my_persister);
531 ///     let background_event_handler = Arc::clone(&my_event_handler);
532 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
533 ///     let background_chan_man = Arc::clone(&my_channel_manager);
534 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
535 ///     let background_peer_man = Arc::clone(&my_peer_manager);
536 ///     let background_logger = Arc::clone(&my_logger);
537 ///     let background_scorer = Arc::clone(&my_scorer);
538 ///
539 ///     // Setup the sleeper.
540 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
541 ///
542 ///     let sleeper = move |d| {
543 ///             let mut receiver = stop_receiver.clone();
544 ///             Box::pin(async move {
545 ///                     tokio::select!{
546 ///                             _ = tokio::time::sleep(d) => false,
547 ///                             _ = receiver.changed() => true,
548 ///                     }
549 ///             })
550 ///     };
551 ///
552 ///     let mobile_interruptable_platform = false;
553 ///
554 ///     let handle = tokio::spawn(async move {
555 ///             process_events_async(
556 ///                     background_persister,
557 ///                     |e| background_event_handler.handle_event(e),
558 ///                     background_chain_mon,
559 ///                     background_chan_man,
560 ///                     background_gossip_sync,
561 ///                     background_peer_man,
562 ///                     background_logger,
563 ///                     Some(background_scorer),
564 ///                     sleeper,
565 ///                     mobile_interruptable_platform,
566 ///                     )
567 ///                     .await
568 ///                     .expect("Failed to process events");
569 ///     });
570 ///
571 ///     // Stop the background processing.
572 ///     stop_sender.send(()).unwrap();
573 ///     handle.await.unwrap();
574 ///     # }
575 ///```
576 #[cfg(feature = "futures")]
577 pub async fn process_events_async<
578         'a,
579         UL: 'static + Deref + Send + Sync,
580         CF: 'static + Deref + Send + Sync,
581         CW: 'static + Deref + Send + Sync,
582         T: 'static + Deref + Send + Sync,
583         ES: 'static + Deref + Send + Sync,
584         NS: 'static + Deref + Send + Sync,
585         SP: 'static + Deref + Send + Sync,
586         F: 'static + Deref + Send + Sync,
587         R: 'static + Deref + Send + Sync,
588         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
589         L: 'static + Deref + Send + Sync,
590         P: 'static + Deref + Send + Sync,
591         EventHandlerFuture: core::future::Future<Output = ()>,
592         EventHandler: Fn(Event) -> EventHandlerFuture,
593         PS: 'static + Deref + Send,
594         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
595         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
596         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
597         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
598         APM: APeerManager + Send + Sync,
599         PM: 'static + Deref<Target = APM> + Send + Sync,
600         S: 'static + Deref<Target = SC> + Send + Sync,
601         SC: for<'b> WriteableScore<'b>,
602         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
603         Sleeper: Fn(Duration) -> SleepFuture
604 >(
605         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
606         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
607         sleeper: Sleeper, mobile_interruptable_platform: bool,
608 ) -> Result<(), lightning::io::Error>
609 where
610         UL::Target: 'static + UtxoLookup,
611         CF::Target: 'static + chain::Filter,
612         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
613         T::Target: 'static + BroadcasterInterface,
614         ES::Target: 'static + EntropySource,
615         NS::Target: 'static + NodeSigner,
616         SP::Target: 'static + SignerProvider,
617         F::Target: 'static + FeeEstimator,
618         R::Target: 'static + Router,
619         L::Target: 'static + Logger,
620         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
621         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
622 {
623         let mut should_break = false;
624         let async_event_handler = |event| {
625                 let network_graph = gossip_sync.network_graph();
626                 let event_handler = &event_handler;
627                 let scorer = &scorer;
628                 let logger = &logger;
629                 let persister = &persister;
630                 async move {
631                         if let Some(network_graph) = network_graph {
632                                 handle_network_graph_update(network_graph, &event)
633                         }
634                         if let Some(ref scorer) = scorer {
635                                 if update_scorer(scorer, &event) {
636                                         log_trace!(logger, "Persisting scorer after update");
637                                         if let Err(e) = persister.persist_scorer(&scorer) {
638                                                 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
639                                         }
640                                 }
641                         }
642                         event_handler(event).await;
643                 }
644         };
645         define_run_body!(persister,
646                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
647                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
648                 gossip_sync, peer_manager, logger, scorer, should_break, {
649                         let fut = Selector {
650                                 a: channel_manager.get_persistable_update_future(),
651                                 b: chain_monitor.get_update_future(),
652                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
653                         };
654                         match fut.await {
655                                 SelectorOutput::A => true,
656                                 SelectorOutput::B => false,
657                                 SelectorOutput::C(exit) => {
658                                         should_break = exit;
659                                         false
660                                 }
661                         }
662                 }, |t| sleeper(Duration::from_secs(t)),
663                 |fut: &mut SleepFuture, _| {
664                         let mut waker = dummy_waker();
665                         let mut ctx = task::Context::from_waker(&mut waker);
666                         match core::pin::Pin::new(fut).poll(&mut ctx) {
667                                 task::Poll::Ready(exit) => { should_break = exit; true },
668                                 task::Poll::Pending => false,
669                         }
670                 }, mobile_interruptable_platform)
671 }
672
673 #[cfg(feature = "std")]
674 impl BackgroundProcessor {
675         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
676         /// documentation].
677         ///
678         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
679         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
680         /// either [`join`] or [`stop`].
681         ///
682         /// # Data Persistence
683         ///
684         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
685         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
686         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
687         /// provided implementation.
688         ///
689         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
690         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
691         /// See the `lightning-persister` crate for LDK's provided implementation.
692         ///
693         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
694         /// error or call [`join`] and handle any error that may arise. For the latter case,
695         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
696         ///
697         /// # Event Handling
698         ///
699         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
700         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
701         /// functionality implemented by other handlers.
702         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
703         ///
704         /// # Rapid Gossip Sync
705         ///
706         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
707         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
708         /// until the [`RapidGossipSync`] instance completes its first sync.
709         ///
710         /// [top-level documentation]: BackgroundProcessor
711         /// [`join`]: Self::join
712         /// [`stop`]: Self::stop
713         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
714         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
715         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
716         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
717         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
718         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
719         pub fn start<
720                 'a,
721                 UL: 'static + Deref + Send + Sync,
722                 CF: 'static + Deref + Send + Sync,
723                 CW: 'static + Deref + Send + Sync,
724                 T: 'static + Deref + Send + Sync,
725                 ES: 'static + Deref + Send + Sync,
726                 NS: 'static + Deref + Send + Sync,
727                 SP: 'static + Deref + Send + Sync,
728                 F: 'static + Deref + Send + Sync,
729                 R: 'static + Deref + Send + Sync,
730                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
731                 L: 'static + Deref + Send + Sync,
732                 P: 'static + Deref + Send + Sync,
733                 EH: 'static + EventHandler + Send,
734                 PS: 'static + Deref + Send,
735                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
736                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
737                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
738                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
739                 APM: APeerManager + Send + Sync,
740                 PM: 'static + Deref<Target = APM> + Send + Sync,
741                 S: 'static + Deref<Target = SC> + Send + Sync,
742                 SC: for <'b> WriteableScore<'b>,
743         >(
744                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
745                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
746         ) -> Self
747         where
748                 UL::Target: 'static + UtxoLookup,
749                 CF::Target: 'static + chain::Filter,
750                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
751                 T::Target: 'static + BroadcasterInterface,
752                 ES::Target: 'static + EntropySource,
753                 NS::Target: 'static + NodeSigner,
754                 SP::Target: 'static + SignerProvider,
755                 F::Target: 'static + FeeEstimator,
756                 R::Target: 'static + Router,
757                 L::Target: 'static + Logger,
758                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
759                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
760         {
761                 let stop_thread = Arc::new(AtomicBool::new(false));
762                 let stop_thread_clone = stop_thread.clone();
763                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
764                         let event_handler = |event| {
765                                 let network_graph = gossip_sync.network_graph();
766                                 if let Some(network_graph) = network_graph {
767                                         handle_network_graph_update(network_graph, &event)
768                                 }
769                                 if let Some(ref scorer) = scorer {
770                                         if update_scorer(scorer, &event) {
771                                                 log_trace!(logger, "Persisting scorer after update");
772                                                 if let Err(e) = persister.persist_scorer(&scorer) {
773                                                         log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
774                                                 }
775                                         }
776                                 }
777                                 event_handler.handle_event(event);
778                         };
779                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
780                                 channel_manager, channel_manager.process_pending_events(&event_handler),
781                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
782                                 Sleeper::from_two_futures(
783                                         channel_manager.get_persistable_update_future(),
784                                         chain_monitor.get_update_future()
785                                 ).wait_timeout(Duration::from_millis(100)),
786                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
787                 });
788                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
789         }
790
791         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
792         /// [`ChannelManager`].
793         ///
794         /// # Panics
795         ///
796         /// This function panics if the background thread has panicked such as while persisting or
797         /// handling events.
798         ///
799         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
800         pub fn join(mut self) -> Result<(), std::io::Error> {
801                 assert!(self.thread_handle.is_some());
802                 self.join_thread()
803         }
804
805         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
806         /// [`ChannelManager`].
807         ///
808         /// # Panics
809         ///
810         /// This function panics if the background thread has panicked such as while persisting or
811         /// handling events.
812         ///
813         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
814         pub fn stop(mut self) -> Result<(), std::io::Error> {
815                 assert!(self.thread_handle.is_some());
816                 self.stop_and_join_thread()
817         }
818
819         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
820                 self.stop_thread.store(true, Ordering::Release);
821                 self.join_thread()
822         }
823
824         fn join_thread(&mut self) -> Result<(), std::io::Error> {
825                 match self.thread_handle.take() {
826                         Some(handle) => handle.join().unwrap(),
827                         None => Ok(()),
828                 }
829         }
830 }
831
832 #[cfg(feature = "std")]
833 impl Drop for BackgroundProcessor {
834         fn drop(&mut self) {
835                 self.stop_and_join_thread().unwrap();
836         }
837 }
838
839 #[cfg(all(feature = "std", test))]
840 mod tests {
841         use bitcoin::blockdata::constants::{genesis_block, ChainHash};
842         use bitcoin::blockdata::locktime::PackedLockTime;
843         use bitcoin::blockdata::transaction::{Transaction, TxOut};
844         use bitcoin::network::constants::Network;
845         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
846         use lightning::chain::{BestBlock, Confirm, chainmonitor};
847         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
848         use lightning::sign::{InMemorySigner, KeysManager};
849         use lightning::chain::transaction::OutPoint;
850         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
851         use lightning::{get_event_msg, get_event};
852         use lightning::ln::PaymentHash;
853         use lightning::ln::channelmanager;
854         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
855         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
856         use lightning::ln::functional_test_utils::*;
857         use lightning::ln::msgs::{ChannelMessageHandler, Init};
858         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
859         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
860         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
861         use lightning::routing::scoring::{ChannelUsage, Score};
862         use lightning::util::config::UserConfig;
863         use lightning::util::ser::Writeable;
864         use lightning::util::test_utils;
865         use lightning::util::persist::KVStorePersister;
866         use lightning_persister::FilesystemPersister;
867         use std::collections::VecDeque;
868         use std::{fs, env};
869         use std::path::PathBuf;
870         use std::sync::{Arc, Mutex};
871         use std::sync::mpsc::SyncSender;
872         use std::time::Duration;
873         use lightning_rapid_gossip_sync::RapidGossipSync;
874         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
875
876         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
877
878         #[derive(Clone, Hash, PartialEq, Eq)]
879         struct TestDescriptor{}
880         impl SocketDescriptor for TestDescriptor {
881                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
882                         0
883                 }
884
885                 fn disconnect_socket(&mut self) {}
886         }
887
888         #[cfg(not(c_bindings))]
889         type ChannelManager =
890                 channelmanager::ChannelManager<
891                         Arc<ChainMonitor>,
892                         Arc<test_utils::TestBroadcaster>,
893                         Arc<KeysManager>,
894                         Arc<KeysManager>,
895                         Arc<KeysManager>,
896                         Arc<test_utils::TestFeeEstimator>,
897                         Arc<DefaultRouter<
898                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
899                                 Arc<test_utils::TestLogger>,
900                                 Arc<Mutex<TestScorer>>,
901                                 (),
902                                 TestScorer>
903                         >,
904                         Arc<test_utils::TestLogger>>;
905         #[cfg(c_bindings)]
906         type ChannelManager =
907                 channelmanager::ChannelManager<
908                         Arc<ChainMonitor>,
909                         Arc<test_utils::TestBroadcaster>,
910                         Arc<KeysManager>,
911                         Arc<KeysManager>,
912                         Arc<KeysManager>,
913                         Arc<test_utils::TestFeeEstimator>,
914                         Arc<DefaultRouter<
915                                 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
916                                 Arc<test_utils::TestLogger>,
917                                 Arc<Mutex<TestScorer>>,
918                                 TestScorer>
919                         >,
920                         Arc<test_utils::TestLogger>>;
921
922         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
923
924         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
925         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
926
927         struct Node {
928                 node: Arc<ChannelManager>,
929                 p2p_gossip_sync: PGS,
930                 rapid_gossip_sync: RGS,
931                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
932                 chain_monitor: Arc<ChainMonitor>,
933                 persister: Arc<FilesystemPersister>,
934                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
935                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
936                 logger: Arc<test_utils::TestLogger>,
937                 best_block: BestBlock,
938                 scorer: Arc<Mutex<TestScorer>>,
939         }
940
941         impl Node {
942                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
943                         GossipSync::P2P(self.p2p_gossip_sync.clone())
944                 }
945
946                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
947                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
948                 }
949
950                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
951                         GossipSync::None
952                 }
953         }
954
955         impl Drop for Node {
956                 fn drop(&mut self) {
957                         let data_dir = self.persister.get_data_dir();
958                         match fs::remove_dir_all(data_dir.clone()) {
959                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
960                                 _ => {}
961                         }
962                 }
963         }
964
965         struct Persister {
966                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
967                 graph_persistence_notifier: Option<SyncSender<()>>,
968                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
969                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
970                 filesystem_persister: FilesystemPersister,
971         }
972
973         impl Persister {
974                 fn new(data_dir: String) -> Self {
975                         let filesystem_persister = FilesystemPersister::new(data_dir);
976                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
977                 }
978
979                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
980                         Self { graph_error: Some((error, message)), ..self }
981                 }
982
983                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
984                         Self { graph_persistence_notifier: Some(sender), ..self }
985                 }
986
987                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
988                         Self { manager_error: Some((error, message)), ..self }
989                 }
990
991                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
992                         Self { scorer_error: Some((error, message)), ..self }
993                 }
994         }
995
996         impl KVStorePersister for Persister {
997                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
998                         if key == "manager" {
999                                 if let Some((error, message)) = self.manager_error {
1000                                         return Err(std::io::Error::new(error, message))
1001                                 }
1002                         }
1003
1004                         if key == "network_graph" {
1005                                 if let Some(sender) = &self.graph_persistence_notifier {
1006                                         match sender.send(()) {
1007                                                 Ok(()) => {},
1008                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1009                                         }
1010                                 };
1011
1012                                 if let Some((error, message)) = self.graph_error {
1013                                         return Err(std::io::Error::new(error, message))
1014                                 }
1015                         }
1016
1017                         if key == "scorer" {
1018                                 if let Some((error, message)) = self.scorer_error {
1019                                         return Err(std::io::Error::new(error, message))
1020                                 }
1021                         }
1022
1023                         self.filesystem_persister.persist(key, object)
1024                 }
1025         }
1026
1027         struct TestScorer {
1028                 event_expectations: Option<VecDeque<TestResult>>,
1029         }
1030
1031         #[derive(Debug)]
1032         enum TestResult {
1033                 PaymentFailure { path: Path, short_channel_id: u64 },
1034                 PaymentSuccess { path: Path },
1035                 ProbeFailure { path: Path },
1036                 ProbeSuccess { path: Path },
1037         }
1038
1039         impl TestScorer {
1040                 fn new() -> Self {
1041                         Self { event_expectations: None }
1042                 }
1043
1044                 fn expect(&mut self, expectation: TestResult) {
1045                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1046                 }
1047         }
1048
1049         impl lightning::util::ser::Writeable for TestScorer {
1050                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1051         }
1052
1053         impl Score for TestScorer {
1054                 #[cfg(not(c_bindings))]
1055                 type ScoreParams = ();
1056                 fn channel_penalty_msat(
1057                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1058                 ) -> u64 { unimplemented!(); }
1059
1060                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1061                         if let Some(expectations) = &mut self.event_expectations {
1062                                 match expectations.pop_front().unwrap() {
1063                                         TestResult::PaymentFailure { path, short_channel_id } => {
1064                                                 assert_eq!(actual_path, &path);
1065                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1066                                         },
1067                                         TestResult::PaymentSuccess { path } => {
1068                                                 panic!("Unexpected successful payment path: {:?}", path)
1069                                         },
1070                                         TestResult::ProbeFailure { path } => {
1071                                                 panic!("Unexpected probe failure: {:?}", path)
1072                                         },
1073                                         TestResult::ProbeSuccess { path } => {
1074                                                 panic!("Unexpected probe success: {:?}", path)
1075                                         }
1076                                 }
1077                         }
1078                 }
1079
1080                 fn payment_path_successful(&mut self, actual_path: &Path) {
1081                         if let Some(expectations) = &mut self.event_expectations {
1082                                 match expectations.pop_front().unwrap() {
1083                                         TestResult::PaymentFailure { path, .. } => {
1084                                                 panic!("Unexpected payment path failure: {:?}", path)
1085                                         },
1086                                         TestResult::PaymentSuccess { path } => {
1087                                                 assert_eq!(actual_path, &path);
1088                                         },
1089                                         TestResult::ProbeFailure { path } => {
1090                                                 panic!("Unexpected probe failure: {:?}", path)
1091                                         },
1092                                         TestResult::ProbeSuccess { path } => {
1093                                                 panic!("Unexpected probe success: {:?}", path)
1094                                         }
1095                                 }
1096                         }
1097                 }
1098
1099                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1100                         if let Some(expectations) = &mut self.event_expectations {
1101                                 match expectations.pop_front().unwrap() {
1102                                         TestResult::PaymentFailure { path, .. } => {
1103                                                 panic!("Unexpected payment path failure: {:?}", path)
1104                                         },
1105                                         TestResult::PaymentSuccess { path } => {
1106                                                 panic!("Unexpected payment path success: {:?}", path)
1107                                         },
1108                                         TestResult::ProbeFailure { path } => {
1109                                                 assert_eq!(actual_path, &path);
1110                                         },
1111                                         TestResult::ProbeSuccess { path } => {
1112                                                 panic!("Unexpected probe success: {:?}", path)
1113                                         }
1114                                 }
1115                         }
1116                 }
1117                 fn probe_successful(&mut self, actual_path: &Path) {
1118                         if let Some(expectations) = &mut self.event_expectations {
1119                                 match expectations.pop_front().unwrap() {
1120                                         TestResult::PaymentFailure { path, .. } => {
1121                                                 panic!("Unexpected payment path failure: {:?}", path)
1122                                         },
1123                                         TestResult::PaymentSuccess { path } => {
1124                                                 panic!("Unexpected payment path success: {:?}", path)
1125                                         },
1126                                         TestResult::ProbeFailure { path } => {
1127                                                 panic!("Unexpected probe failure: {:?}", path)
1128                                         },
1129                                         TestResult::ProbeSuccess { path } => {
1130                                                 assert_eq!(actual_path, &path);
1131                                         }
1132                                 }
1133                         }
1134                 }
1135         }
1136
1137         impl Drop for TestScorer {
1138                 fn drop(&mut self) {
1139                         if std::thread::panicking() {
1140                                 return;
1141                         }
1142
1143                         if let Some(event_expectations) = &self.event_expectations {
1144                                 if !event_expectations.is_empty() {
1145                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1146                                 }
1147                         }
1148                 }
1149         }
1150
1151         fn get_full_filepath(filepath: String, filename: String) -> String {
1152                 let mut path = PathBuf::from(filepath);
1153                 path.push(filename);
1154                 path.to_str().unwrap().to_string()
1155         }
1156
1157         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1158                 let persist_temp_path = env::temp_dir().join(persist_dir);
1159                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1160                 let network = Network::Bitcoin;
1161                 let mut nodes = Vec::new();
1162                 for i in 0..num_nodes {
1163                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1164                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1165                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1166                         let genesis_block = genesis_block(network);
1167                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1168                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1169                         let seed = [i as u8; 32];
1170                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1171                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1172                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
1173                         let now = Duration::from_secs(genesis_block.header.time as u64);
1174                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1175                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1176                         let best_block = BestBlock::from_network(network);
1177                         let params = ChainParameters { network, best_block };
1178                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1179                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1180                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1181                         let msg_handler = MessageHandler {
1182                                 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1183                                 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1184                                 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1185                         };
1186                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1187                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1188                         nodes.push(node);
1189                 }
1190
1191                 for i in 0..num_nodes {
1192                         for j in (i+1)..num_nodes {
1193                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1194                                         features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1195                                 }, true).unwrap();
1196                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1197                                         features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1198                                 }, false).unwrap();
1199                         }
1200                 }
1201
1202                 (persist_dir, nodes)
1203         }
1204
1205         macro_rules! open_channel {
1206                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1207                         begin_open_channel!($node_a, $node_b, $channel_value);
1208                         let events = $node_a.node.get_and_clear_pending_events();
1209                         assert_eq!(events.len(), 1);
1210                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1211                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1212                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1213                         get_event!($node_b, Event::ChannelPending);
1214                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1215                         get_event!($node_a, Event::ChannelPending);
1216                         tx
1217                 }}
1218         }
1219
1220         macro_rules! begin_open_channel {
1221                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1222                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1223                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1224                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1225                 }}
1226         }
1227
1228         macro_rules! handle_funding_generation_ready {
1229                 ($event: expr, $channel_value: expr) => {{
1230                         match $event {
1231                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1232                                         assert_eq!(channel_value_satoshis, $channel_value);
1233                                         assert_eq!(user_channel_id, 42);
1234
1235                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1236                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1237                                         }]};
1238                                         (temporary_channel_id, tx)
1239                                 },
1240                                 _ => panic!("Unexpected event"),
1241                         }
1242                 }}
1243         }
1244
1245         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1246                 for i in 1..=depth {
1247                         let prev_blockhash = node.best_block.block_hash();
1248                         let height = node.best_block.height() + 1;
1249                         let header = create_dummy_header(prev_blockhash, height);
1250                         let txdata = vec![(0, tx)];
1251                         node.best_block = BestBlock::new(header.block_hash(), height);
1252                         match i {
1253                                 1 => {
1254                                         node.node.transactions_confirmed(&header, &txdata, height);
1255                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1256                                 },
1257                                 x if x == depth => {
1258                                         node.node.best_block_updated(&header, height);
1259                                         node.chain_monitor.best_block_updated(&header, height);
1260                                 },
1261                                 _ => {},
1262                         }
1263                 }
1264         }
1265         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1266                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1267         }
1268
1269         #[test]
1270         fn test_background_processor() {
1271                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1272                 // updates. Also test that when new updates are available, the manager signals that it needs
1273                 // re-persistence and is successfully re-persisted.
1274                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1275
1276                 // Go through the channel creation process so that each node has something to persist. Since
1277                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1278                 // avoid a race with processing events.
1279                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1280
1281                 // Initiate the background processors to watch each node.
1282                 let data_dir = nodes[0].persister.get_data_dir();
1283                 let persister = Arc::new(Persister::new(data_dir));
1284                 let event_handler = |_: _| {};
1285                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1286
1287                 macro_rules! check_persisted_data {
1288                         ($node: expr, $filepath: expr) => {
1289                                 let mut expected_bytes = Vec::new();
1290                                 loop {
1291                                         expected_bytes.clear();
1292                                         match $node.write(&mut expected_bytes) {
1293                                                 Ok(()) => {
1294                                                         match std::fs::read($filepath) {
1295                                                                 Ok(bytes) => {
1296                                                                         if bytes == expected_bytes {
1297                                                                                 break
1298                                                                         } else {
1299                                                                                 continue
1300                                                                         }
1301                                                                 },
1302                                                                 Err(_) => continue
1303                                                         }
1304                                                 },
1305                                                 Err(e) => panic!("Unexpected error: {}", e)
1306                                         }
1307                                 }
1308                         }
1309                 }
1310
1311                 // Check that the initial channel manager data is persisted as expected.
1312                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1313                 check_persisted_data!(nodes[0].node, filepath.clone());
1314
1315                 loop {
1316                         if !nodes[0].node.get_persistence_condvar_value() { break }
1317                 }
1318
1319                 // Force-close the channel.
1320                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1321
1322                 // Check that the force-close updates are persisted.
1323                 check_persisted_data!(nodes[0].node, filepath.clone());
1324                 loop {
1325                         if !nodes[0].node.get_persistence_condvar_value() { break }
1326                 }
1327
1328                 // Check network graph is persisted
1329                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1330                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1331
1332                 // Check scorer is persisted
1333                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1334                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1335
1336                 if !std::thread::panicking() {
1337                         bg_processor.stop().unwrap();
1338                 }
1339         }
1340
1341         #[test]
1342         fn test_timer_tick_called() {
1343                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1344                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1345                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1346                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1347                 let data_dir = nodes[0].persister.get_data_dir();
1348                 let persister = Arc::new(Persister::new(data_dir));
1349                 let event_handler = |_: _| {};
1350                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1351                 loop {
1352                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1353                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1354                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1355                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1356                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1357                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1358                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1359                                 break
1360                         }
1361                 }
1362
1363                 if !std::thread::panicking() {
1364                         bg_processor.stop().unwrap();
1365                 }
1366         }
1367
1368         #[test]
1369         fn test_channel_manager_persist_error() {
1370                 // Test that if we encounter an error during manager persistence, the thread panics.
1371                 let (_, nodes) = create_nodes(2, "test_persist_error");
1372                 open_channel!(nodes[0], nodes[1], 100000);
1373
1374                 let data_dir = nodes[0].persister.get_data_dir();
1375                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1376                 let event_handler = |_: _| {};
1377                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1378                 match bg_processor.join() {
1379                         Ok(_) => panic!("Expected error persisting manager"),
1380                         Err(e) => {
1381                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1382                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1383                         },
1384                 }
1385         }
1386
1387         #[tokio::test]
1388         #[cfg(feature = "futures")]
1389         async fn test_channel_manager_persist_error_async() {
1390                 // Test that if we encounter an error during manager persistence, the thread panics.
1391                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1392                 open_channel!(nodes[0], nodes[1], 100000);
1393
1394                 let data_dir = nodes[0].persister.get_data_dir();
1395                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1396
1397                 let bp_future = super::process_events_async(
1398                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1399                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1400                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1401                                 Box::pin(async move {
1402                                         tokio::time::sleep(dur).await;
1403                                         false // Never exit
1404                                 })
1405                         }, false,
1406                 );
1407                 match bp_future.await {
1408                         Ok(_) => panic!("Expected error persisting manager"),
1409                         Err(e) => {
1410                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1411                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1412                         },
1413                 }
1414         }
1415
1416         #[test]
1417         fn test_network_graph_persist_error() {
1418                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1419                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1420                 let data_dir = nodes[0].persister.get_data_dir();
1421                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1422                 let event_handler = |_: _| {};
1423                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1424
1425                 match bg_processor.stop() {
1426                         Ok(_) => panic!("Expected error persisting network graph"),
1427                         Err(e) => {
1428                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1429                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1430                         },
1431                 }
1432         }
1433
1434         #[test]
1435         fn test_scorer_persist_error() {
1436                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1437                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1438                 let data_dir = nodes[0].persister.get_data_dir();
1439                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1440                 let event_handler = |_: _| {};
1441                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1442
1443                 match bg_processor.stop() {
1444                         Ok(_) => panic!("Expected error persisting scorer"),
1445                         Err(e) => {
1446                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1447                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1448                         },
1449                 }
1450         }
1451
1452         #[test]
1453         fn test_background_event_handling() {
1454                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1455                 let channel_value = 100000;
1456                 let data_dir = nodes[0].persister.get_data_dir();
1457                 let persister = Arc::new(Persister::new(data_dir.clone()));
1458
1459                 // Set up a background event handler for FundingGenerationReady events.
1460                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1461                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1462                 let event_handler = move |event: Event| match event {
1463                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1464                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1465                         Event::ChannelReady { .. } => {},
1466                         _ => panic!("Unexpected event: {:?}", event),
1467                 };
1468
1469                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1470
1471                 // Open a channel and check that the FundingGenerationReady event was handled.
1472                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1473                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1474                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1475                         .expect("FundingGenerationReady not handled within deadline");
1476                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1477                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1478                 get_event!(nodes[1], Event::ChannelPending);
1479                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1480                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1481                         .expect("ChannelPending not handled within deadline");
1482
1483                 // Confirm the funding transaction.
1484                 confirm_transaction(&mut nodes[0], &funding_tx);
1485                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1486                 confirm_transaction(&mut nodes[1], &funding_tx);
1487                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1488                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1489                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1490                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1491                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1492
1493                 if !std::thread::panicking() {
1494                         bg_processor.stop().unwrap();
1495                 }
1496
1497                 // Set up a background event handler for SpendableOutputs events.
1498                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1499                 let event_handler = move |event: Event| match event {
1500                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1501                         Event::ChannelReady { .. } => {},
1502                         Event::ChannelClosed { .. } => {},
1503                         _ => panic!("Unexpected event: {:?}", event),
1504                 };
1505                 let persister = Arc::new(Persister::new(data_dir));
1506                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1507
1508                 // Force close the channel and check that the SpendableOutputs event was handled.
1509                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1510                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1511                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1512
1513                 let event = receiver
1514                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1515                         .expect("Events not handled within deadline");
1516                 match event {
1517                         Event::SpendableOutputs { .. } => {},
1518                         _ => panic!("Unexpected event: {:?}", event),
1519                 }
1520
1521                 if !std::thread::panicking() {
1522                         bg_processor.stop().unwrap();
1523                 }
1524         }
1525
1526         #[test]
1527         fn test_scorer_persistence() {
1528                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1529                 let data_dir = nodes[0].persister.get_data_dir();
1530                 let persister = Arc::new(Persister::new(data_dir));
1531                 let event_handler = |_: _| {};
1532                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1533
1534                 loop {
1535                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1536                         let expected_log = "Persisting scorer".to_string();
1537                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1538                                 break
1539                         }
1540                 }
1541
1542                 if !std::thread::panicking() {
1543                         bg_processor.stop().unwrap();
1544                 }
1545         }
1546
1547         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1548                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1549                         let features = ChannelFeatures::empty();
1550                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1551                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1552                         ).expect("Failed to update channel from partial announcement");
1553                         let original_graph_description = $nodes[0].network_graph.to_string();
1554                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1555                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1556
1557                         loop {
1558                                 $sleep;
1559                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1560                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1561                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1562                                         .unwrap_or(&0) > 1
1563                                 {
1564                                         // Wait until the loop has gone around at least twice.
1565                                         break
1566                                 }
1567                         }
1568
1569                         let initialization_input = vec![
1570                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1571                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1572                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1573                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1574                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1575                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1576                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1577                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1578                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1579                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1580                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1581                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1582                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1583                         ];
1584                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1585
1586                         // this should have added two channels and pruned the previous one.
1587                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1588
1589                         $receive.expect("Network graph not pruned within deadline");
1590
1591                         // all channels should now be pruned
1592                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1593                 }
1594         }
1595
1596         #[test]
1597         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1598                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1599
1600                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1601                 let data_dir = nodes[0].persister.get_data_dir();
1602                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1603
1604                 let event_handler = |_: _| {};
1605                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1606
1607                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1608                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1609                         std::thread::sleep(Duration::from_millis(1)));
1610
1611                 background_processor.stop().unwrap();
1612         }
1613
1614         #[tokio::test]
1615         #[cfg(feature = "futures")]
1616         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1617                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1618
1619                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1620                 let data_dir = nodes[0].persister.get_data_dir();
1621                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1622
1623                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1624                 let bp_future = super::process_events_async(
1625                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1626                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1627                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1628                                 let mut exit_receiver = exit_receiver.clone();
1629                                 Box::pin(async move {
1630                                         tokio::select! {
1631                                                 _ = tokio::time::sleep(dur) => false,
1632                                                 _ = exit_receiver.changed() => true,
1633                                         }
1634                                 })
1635                         }, false,
1636                 );
1637
1638                 let t1 = tokio::spawn(bp_future);
1639                 let t2 = tokio::spawn(async move {
1640                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1641                                 let mut i = 0;
1642                                 loop {
1643                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1644                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1645                                         assert!(i < 5);
1646                                         i += 1;
1647                                 }
1648                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1649                         exit_sender.send(()).unwrap();
1650                 });
1651                 let (r1, r2) = tokio::join!(t1, t2);
1652                 r1.unwrap().unwrap();
1653                 r2.unwrap()
1654         }
1655
1656         macro_rules! do_test_payment_path_scoring {
1657                 ($nodes: expr, $receive: expr) => {
1658                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1659                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1660                         // public or else we won't score it).
1661                         // A background event handler for FundingGenerationReady events must be hooked up to a
1662                         // running background processor.
1663                         let scored_scid = 4242;
1664                         let secp_ctx = Secp256k1::new();
1665                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1666                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1667
1668                         let path = Path { hops: vec![RouteHop {
1669                                 pubkey: node_1_id,
1670                                 node_features: NodeFeatures::empty(),
1671                                 short_channel_id: scored_scid,
1672                                 channel_features: ChannelFeatures::empty(),
1673                                 fee_msat: 0,
1674                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1675                         }], blinded_tail: None };
1676
1677                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1678                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1679                                 payment_id: None,
1680                                 payment_hash: PaymentHash([42; 32]),
1681                                 payment_failed_permanently: false,
1682                                 failure: PathFailure::OnPath { network_update: None },
1683                                 path: path.clone(),
1684                                 short_channel_id: Some(scored_scid),
1685                         });
1686                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1687                         match event {
1688                                 Event::PaymentPathFailed { .. } => {},
1689                                 _ => panic!("Unexpected event"),
1690                         }
1691
1692                         // Ensure we'll score payments that were explicitly failed back by the destination as
1693                         // ProbeSuccess.
1694                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1695                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1696                                 payment_id: None,
1697                                 payment_hash: PaymentHash([42; 32]),
1698                                 payment_failed_permanently: true,
1699                                 failure: PathFailure::OnPath { network_update: None },
1700                                 path: path.clone(),
1701                                 short_channel_id: None,
1702                         });
1703                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1704                         match event {
1705                                 Event::PaymentPathFailed { .. } => {},
1706                                 _ => panic!("Unexpected event"),
1707                         }
1708
1709                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1710                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1711                                 payment_id: PaymentId([42; 32]),
1712                                 payment_hash: None,
1713                                 path: path.clone(),
1714                         });
1715                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1716                         match event {
1717                                 Event::PaymentPathSuccessful { .. } => {},
1718                                 _ => panic!("Unexpected event"),
1719                         }
1720
1721                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1722                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1723                                 payment_id: PaymentId([42; 32]),
1724                                 payment_hash: PaymentHash([42; 32]),
1725                                 path: path.clone(),
1726                         });
1727                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1728                         match event {
1729                                 Event::ProbeSuccessful  { .. } => {},
1730                                 _ => panic!("Unexpected event"),
1731                         }
1732
1733                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1734                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1735                                 payment_id: PaymentId([42; 32]),
1736                                 payment_hash: PaymentHash([42; 32]),
1737                                 path,
1738                                 short_channel_id: Some(scored_scid),
1739                         });
1740                         let event = $receive.expect("ProbeFailure not handled within deadline");
1741                         match event {
1742                                 Event::ProbeFailed { .. } => {},
1743                                 _ => panic!("Unexpected event"),
1744                         }
1745                 }
1746         }
1747
1748         #[test]
1749         fn test_payment_path_scoring() {
1750                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1751                 let event_handler = move |event: Event| match event {
1752                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1753                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1754                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1755                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1756                         _ => panic!("Unexpected event: {:?}", event),
1757                 };
1758
1759                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1760                 let data_dir = nodes[0].persister.get_data_dir();
1761                 let persister = Arc::new(Persister::new(data_dir));
1762                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1763
1764                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1765
1766                 if !std::thread::panicking() {
1767                         bg_processor.stop().unwrap();
1768                 }
1769
1770                 let log_entries = nodes[0].logger.lines.lock().unwrap();
1771                 let expected_log = "Persisting scorer after update".to_string();
1772                 assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1773         }
1774
1775         #[tokio::test]
1776         #[cfg(feature = "futures")]
1777         async fn test_payment_path_scoring_async() {
1778                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1779                 let event_handler = move |event: Event| {
1780                         let sender_ref = sender.clone();
1781                         async move {
1782                                 match event {
1783                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1784                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1785                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1786                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1787                                         _ => panic!("Unexpected event: {:?}", event),
1788                                 }
1789                         }
1790                 };
1791
1792                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1793                 let data_dir = nodes[0].persister.get_data_dir();
1794                 let persister = Arc::new(Persister::new(data_dir));
1795
1796                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1797
1798                 let bp_future = super::process_events_async(
1799                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1800                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1801                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1802                                 let mut exit_receiver = exit_receiver.clone();
1803                                 Box::pin(async move {
1804                                         tokio::select! {
1805                                                 _ = tokio::time::sleep(dur) => false,
1806                                                 _ = exit_receiver.changed() => true,
1807                                         }
1808                                 })
1809                         }, false,
1810                 );
1811                 let t1 = tokio::spawn(bp_future);
1812                 let t2 = tokio::spawn(async move {
1813                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1814                         exit_sender.send(()).unwrap();
1815
1816                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1817                         let expected_log = "Persisting scorer after update".to_string();
1818                         assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5);
1819                 });
1820
1821                 let (r1, r2) = tokio::join!(t1, t2);
1822                 r1.unwrap().unwrap();
1823                 r2.unwrap()
1824         }
1825 }