Merge pull request #2253 from dunxen/2023-05-removeoptionalfield
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
68 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(not(test))]
120 const REBROADCAST_TIMER: u64 = 30;
121 #[cfg(test)]
122 const REBROADCAST_TIMER: u64 = 1;
123
124 #[cfg(feature = "futures")]
125 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
126 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
127 #[cfg(feature = "futures")]
128 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
129         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
130
131 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
132 pub enum GossipSync<
133         P: Deref<Target = P2PGossipSync<G, U, L>>,
134         R: Deref<Target = RapidGossipSync<G, L>>,
135         G: Deref<Target = NetworkGraph<L>>,
136         U: Deref,
137         L: Deref,
138 >
139 where U::Target: UtxoLookup, L::Target: Logger {
140         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
141         P2P(P),
142         /// Rapid gossip sync from a trusted server.
143         Rapid(R),
144         /// No gossip sync.
145         None,
146 }
147
148 impl<
149         P: Deref<Target = P2PGossipSync<G, U, L>>,
150         R: Deref<Target = RapidGossipSync<G, L>>,
151         G: Deref<Target = NetworkGraph<L>>,
152         U: Deref,
153         L: Deref,
154 > GossipSync<P, R, G, U, L>
155 where U::Target: UtxoLookup, L::Target: Logger {
156         fn network_graph(&self) -> Option<&G> {
157                 match self {
158                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
159                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::None => None,
161                 }
162         }
163
164         fn prunable_network_graph(&self) -> Option<&G> {
165                 match self {
166                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
167                         GossipSync::Rapid(gossip_sync) => {
168                                 if gossip_sync.is_initial_sync_complete() {
169                                         Some(gossip_sync.network_graph())
170                                 } else {
171                                         None
172                                 }
173                         },
174                         GossipSync::None => None,
175                 }
176         }
177 }
178
179 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
180 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
181         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
182 where
183         U::Target: UtxoLookup,
184         L::Target: Logger,
185 {
186         /// Initializes a new [`GossipSync::P2P`] variant.
187         pub fn p2p(gossip_sync: P) -> Self {
188                 GossipSync::P2P(gossip_sync)
189         }
190 }
191
192 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
193 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
194         GossipSync<
195                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
196                 R,
197                 G,
198                 &'a (dyn UtxoLookup + Send + Sync),
199                 L,
200         >
201 where
202         L::Target: Logger,
203 {
204         /// Initializes a new [`GossipSync::Rapid`] variant.
205         pub fn rapid(gossip_sync: R) -> Self {
206                 GossipSync::Rapid(gossip_sync)
207         }
208 }
209
210 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
211 impl<'a, L: Deref>
212         GossipSync<
213                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
214                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
215                 &'a NetworkGraph<L>,
216                 &'a (dyn UtxoLookup + Send + Sync),
217                 L,
218         >
219 where
220         L::Target: Logger,
221 {
222         /// Initializes a new [`GossipSync::None`] variant.
223         pub fn none() -> Self {
224                 GossipSync::None
225         }
226 }
227
228 fn handle_network_graph_update<L: Deref>(
229         network_graph: &NetworkGraph<L>, event: &Event
230 ) where L::Target: Logger {
231         if let Event::PaymentPathFailed {
232                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
233         {
234                 network_graph.handle_network_update(upd);
235         }
236 }
237
238 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
239         scorer: &'a S, event: &Event
240 ) {
241         let mut score = scorer.lock();
242         match event {
243                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
244                         score.payment_path_failed(path, *scid);
245                 },
246                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
247                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
248                         // because the payment made it all the way to the destination with sufficient liquidity.
249                         score.probe_successful(path);
250                 },
251                 Event::PaymentPathSuccessful { path, .. } => {
252                         score.payment_path_successful(path);
253                 },
254                 Event::ProbeSuccessful { path, .. } => {
255                         score.probe_successful(path);
256                 },
257                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
258                         score.probe_failed(path, *scid);
259                 },
260                 _ => {},
261         }
262 }
263
264 macro_rules! define_run_body {
265         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
266          $channel_manager: ident, $process_channel_manager_events: expr,
267          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
268          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
269          $check_slow_await: expr)
270         => { {
271                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
272                 $channel_manager.timer_tick_occurred();
273                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
274                 $chain_monitor.rebroadcast_pending_claims();
275
276                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
277                 let mut last_ping_call = $get_timer(PING_TIMER);
278                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
279                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
280                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
281                 let mut have_pruned = false;
282
283                 loop {
284                         $process_channel_manager_events;
285                         $process_chain_monitor_events;
286
287                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
288                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
289                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
290                         // without running the normal event processing above and handing events to users.
291                         //
292                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
293                         // processing a message effectively at any point during this loop. In order to
294                         // minimize the time between such processing completing and persisting the updated
295                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
296                         // generally, and as a fallback place such blocking only immediately before
297                         // persistence.
298                         $peer_manager.process_events();
299
300                         // Exit the loop if the background processor was requested to stop.
301                         if $loop_exit_check {
302                                 log_trace!($logger, "Terminating background processor.");
303                                 break;
304                         }
305
306                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
307                         // see `await_start`'s use below.
308                         let mut await_start = None;
309                         if $check_slow_await { await_start = Some($get_timer(1)); }
310                         let updates_available = $await;
311                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
312
313                         // Exit the loop if the background processor was requested to stop.
314                         if $loop_exit_check {
315                                 log_trace!($logger, "Terminating background processor.");
316                                 break;
317                         }
318
319                         if updates_available {
320                                 log_trace!($logger, "Persisting ChannelManager...");
321                                 $persister.persist_manager(&*$channel_manager)?;
322                                 log_trace!($logger, "Done persisting ChannelManager.");
323                         }
324                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
325                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
326                                 $channel_manager.timer_tick_occurred();
327                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
328                         }
329                         if await_slow {
330                                 // On various platforms, we may be starved of CPU cycles for several reasons.
331                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
332                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
333                                 // may not get any cycles.
334                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
335                                 // full second, at which point we assume sockets may have been killed (they
336                                 // appear to be at least on some platforms, even if it has only been a second).
337                                 // Note that we have to take care to not get here just because user event
338                                 // processing was slow at the top of the loop. For example, the sample client
339                                 // may call Bitcoin Core RPCs during event handling, which very often takes
340                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
341                                 // peers.
342                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
343                                 $peer_manager.disconnect_all_peers();
344                                 last_ping_call = $get_timer(PING_TIMER);
345                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
346                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
347                                 $peer_manager.timer_tick_occurred();
348                                 last_ping_call = $get_timer(PING_TIMER);
349                         }
350
351                         // Note that we want to run a graph prune once not long after startup before
352                         // falling back to our usual hourly prunes. This avoids short-lived clients never
353                         // pruning their network graph. We run once 60 seconds after startup before
354                         // continuing our normal cadence.
355                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
356                         if $timer_elapsed(&mut last_prune_call, prune_timer) {
357                                 // The network graph must not be pruned while rapid sync completion is pending
358                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
359                                         #[cfg(feature = "std")] {
360                                                 log_trace!($logger, "Pruning and persisting network graph.");
361                                                 network_graph.remove_stale_channels_and_tracking();
362                                         }
363                                         #[cfg(not(feature = "std"))] {
364                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
365                                                 log_trace!($logger, "Persisting network graph.");
366                                         }
367
368                                         if let Err(e) = $persister.persist_graph(network_graph) {
369                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
370                                         }
371
372                                         have_pruned = true;
373                                 }
374                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
375                                 last_prune_call = $get_timer(prune_timer);
376                         }
377
378                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
379                                 if let Some(ref scorer) = $scorer {
380                                         log_trace!($logger, "Persisting scorer");
381                                         if let Err(e) = $persister.persist_scorer(&scorer) {
382                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
383                                         }
384                                 }
385                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
386                         }
387
388                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
389                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
390                                 $chain_monitor.rebroadcast_pending_claims();
391                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
392                         }
393                 }
394
395                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
396                 // some races where users quit while channel updates were in-flight, with
397                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
398                 $persister.persist_manager(&*$channel_manager)?;
399
400                 // Persist Scorer on exit
401                 if let Some(ref scorer) = $scorer {
402                         $persister.persist_scorer(&scorer)?;
403                 }
404
405                 // Persist NetworkGraph on exit
406                 if let Some(network_graph) = $gossip_sync.network_graph() {
407                         $persister.persist_graph(network_graph)?;
408                 }
409
410                 Ok(())
411         } }
412 }
413
414 #[cfg(feature = "futures")]
415 pub(crate) mod futures_util {
416         use core::future::Future;
417         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
418         use core::pin::Pin;
419         use core::marker::Unpin;
420         pub(crate) struct Selector<
421                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
422         > {
423                 pub a: A,
424                 pub b: B,
425                 pub c: C,
426         }
427         pub(crate) enum SelectorOutput {
428                 A, B, C(bool),
429         }
430
431         impl<
432                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
433         > Future for Selector<A, B, C> {
434                 type Output = SelectorOutput;
435                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
436                         match Pin::new(&mut self.a).poll(ctx) {
437                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
438                                 Poll::Pending => {},
439                         }
440                         match Pin::new(&mut self.b).poll(ctx) {
441                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
442                                 Poll::Pending => {},
443                         }
444                         match Pin::new(&mut self.c).poll(ctx) {
445                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
446                                 Poll::Pending => {},
447                         }
448                         Poll::Pending
449                 }
450         }
451
452         // If we want to poll a future without an async context to figure out if it has completed or
453         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
454         // but sadly there's a good bit of boilerplate here.
455         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
456         fn dummy_waker_action(_: *const ()) { }
457
458         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
459                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
460         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
461 }
462 #[cfg(feature = "futures")]
463 use futures_util::{Selector, SelectorOutput, dummy_waker};
464 #[cfg(feature = "futures")]
465 use core::task;
466
467 /// Processes background events in a future.
468 ///
469 /// `sleeper` should return a future which completes in the given amount of time and returns a
470 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
471 /// future which outputs `true`, the loop will exit and this function's future will complete.
472 /// The `sleeper` future is free to return early after it has triggered the exit condition.
473 ///
474 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
475 ///
476 /// Requires the `futures` feature. Note that while this method is available without the `std`
477 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
478 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
479 /// manually instead.
480 ///
481 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
482 /// mobile device, where we may need to check for interruption of the application regularly. If you
483 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
484 /// are hundreds or thousands of simultaneous process calls running.
485 ///
486 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
487 /// could setup `process_events_async` like this:
488 /// ```
489 /// # struct MyPersister {}
490 /// # impl lightning::util::persist::KVStorePersister for MyPersister {
491 /// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
492 /// # }
493 /// # struct MyEventHandler {}
494 /// # impl MyEventHandler {
495 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
496 /// # }
497 /// # #[derive(Eq, PartialEq, Clone, Hash)]
498 /// # struct MySocketDescriptor {}
499 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
500 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
501 /// #     fn disconnect_socket(&mut self) {}
502 /// # }
503 /// # use std::sync::{Arc, Mutex};
504 /// # use std::sync::atomic::{AtomicBool, Ordering};
505 /// # use lightning_background_processor::{process_events_async, GossipSync};
506 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
507 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
508 /// # type MyNodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync;
509 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
510 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
511 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
512 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
513 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
514 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
515 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
516 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
517 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
518 ///
519 /// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
520 ///     let background_persister = Arc::clone(&my_persister);
521 ///     let background_event_handler = Arc::clone(&my_event_handler);
522 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
523 ///     let background_chan_man = Arc::clone(&my_channel_manager);
524 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
525 ///     let background_peer_man = Arc::clone(&my_peer_manager);
526 ///     let background_logger = Arc::clone(&my_logger);
527 ///     let background_scorer = Arc::clone(&my_scorer);
528 ///
529 ///     // Setup the sleeper.
530 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
531 ///
532 ///     let sleeper = move |d| {
533 ///             let mut receiver = stop_receiver.clone();
534 ///             Box::pin(async move {
535 ///                     tokio::select!{
536 ///                             _ = tokio::time::sleep(d) => false,
537 ///                             _ = receiver.changed() => true,
538 ///                     }
539 ///             })
540 ///     };
541 ///
542 ///     let mobile_interruptable_platform = false;
543 ///
544 ///     let handle = tokio::spawn(async move {
545 ///             process_events_async(
546 ///                     background_persister,
547 ///                     |e| background_event_handler.handle_event(e),
548 ///                     background_chain_mon,
549 ///                     background_chan_man,
550 ///                     background_gossip_sync,
551 ///                     background_peer_man,
552 ///                     background_logger,
553 ///                     Some(background_scorer),
554 ///                     sleeper,
555 ///                     mobile_interruptable_platform,
556 ///                     )
557 ///                     .await
558 ///                     .expect("Failed to process events");
559 ///     });
560 ///
561 ///     // Stop the background processing.
562 ///     stop_sender.send(()).unwrap();
563 ///     handle.await.unwrap();
564 ///     # }
565 ///```
566 #[cfg(feature = "futures")]
567 pub async fn process_events_async<
568         'a,
569         UL: 'static + Deref + Send + Sync,
570         CF: 'static + Deref + Send + Sync,
571         CW: 'static + Deref + Send + Sync,
572         T: 'static + Deref + Send + Sync,
573         ES: 'static + Deref + Send + Sync,
574         NS: 'static + Deref + Send + Sync,
575         SP: 'static + Deref + Send + Sync,
576         F: 'static + Deref + Send + Sync,
577         R: 'static + Deref + Send + Sync,
578         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
579         L: 'static + Deref + Send + Sync,
580         P: 'static + Deref + Send + Sync,
581         Descriptor: 'static + SocketDescriptor + Send + Sync,
582         CMH: 'static + Deref + Send + Sync,
583         RMH: 'static + Deref + Send + Sync,
584         OMH: 'static + Deref + Send + Sync,
585         EventHandlerFuture: core::future::Future<Output = ()>,
586         EventHandler: Fn(Event) -> EventHandlerFuture,
587         PS: 'static + Deref + Send,
588         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
589         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
590         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
591         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
592         UMH: 'static + Deref + Send + Sync,
593         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
594         S: 'static + Deref<Target = SC> + Send + Sync,
595         SC: for<'b> WriteableScore<'b>,
596         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
597         Sleeper: Fn(Duration) -> SleepFuture
598 >(
599         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
600         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
601         sleeper: Sleeper, mobile_interruptable_platform: bool,
602 ) -> Result<(), lightning::io::Error>
603 where
604         UL::Target: 'static + UtxoLookup,
605         CF::Target: 'static + chain::Filter,
606         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
607         T::Target: 'static + BroadcasterInterface,
608         ES::Target: 'static + EntropySource,
609         NS::Target: 'static + NodeSigner,
610         SP::Target: 'static + SignerProvider,
611         F::Target: 'static + FeeEstimator,
612         R::Target: 'static + Router,
613         L::Target: 'static + Logger,
614         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
615         CMH::Target: 'static + ChannelMessageHandler,
616         OMH::Target: 'static + OnionMessageHandler,
617         RMH::Target: 'static + RoutingMessageHandler,
618         UMH::Target: 'static + CustomMessageHandler,
619         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
620 {
621         let mut should_break = false;
622         let async_event_handler = |event| {
623                 let network_graph = gossip_sync.network_graph();
624                 let event_handler = &event_handler;
625                 let scorer = &scorer;
626                 async move {
627                         if let Some(network_graph) = network_graph {
628                                 handle_network_graph_update(network_graph, &event)
629                         }
630                         if let Some(ref scorer) = scorer {
631                                 update_scorer(scorer, &event);
632                         }
633                         event_handler(event).await;
634                 }
635         };
636         define_run_body!(persister,
637                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
638                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
639                 gossip_sync, peer_manager, logger, scorer, should_break, {
640                         let fut = Selector {
641                                 a: channel_manager.get_persistable_update_future(),
642                                 b: chain_monitor.get_update_future(),
643                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
644                         };
645                         match fut.await {
646                                 SelectorOutput::A => true,
647                                 SelectorOutput::B => false,
648                                 SelectorOutput::C(exit) => {
649                                         should_break = exit;
650                                         false
651                                 }
652                         }
653                 }, |t| sleeper(Duration::from_secs(t)),
654                 |fut: &mut SleepFuture, _| {
655                         let mut waker = dummy_waker();
656                         let mut ctx = task::Context::from_waker(&mut waker);
657                         match core::pin::Pin::new(fut).poll(&mut ctx) {
658                                 task::Poll::Ready(exit) => { should_break = exit; true },
659                                 task::Poll::Pending => false,
660                         }
661                 }, mobile_interruptable_platform)
662 }
663
664 #[cfg(feature = "std")]
665 impl BackgroundProcessor {
666         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
667         /// documentation].
668         ///
669         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
670         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
671         /// either [`join`] or [`stop`].
672         ///
673         /// # Data Persistence
674         ///
675         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
676         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
677         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
678         /// provided implementation.
679         ///
680         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
681         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
682         /// See the `lightning-persister` crate for LDK's provided implementation.
683         ///
684         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
685         /// error or call [`join`] and handle any error that may arise. For the latter case,
686         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
687         ///
688         /// # Event Handling
689         ///
690         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
691         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
692         /// functionality implemented by other handlers.
693         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
694         ///
695         /// # Rapid Gossip Sync
696         ///
697         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
698         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
699         /// until the [`RapidGossipSync`] instance completes its first sync.
700         ///
701         /// [top-level documentation]: BackgroundProcessor
702         /// [`join`]: Self::join
703         /// [`stop`]: Self::stop
704         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
705         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
706         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
707         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
708         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
709         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
710         pub fn start<
711                 'a,
712                 UL: 'static + Deref + Send + Sync,
713                 CF: 'static + Deref + Send + Sync,
714                 CW: 'static + Deref + Send + Sync,
715                 T: 'static + Deref + Send + Sync,
716                 ES: 'static + Deref + Send + Sync,
717                 NS: 'static + Deref + Send + Sync,
718                 SP: 'static + Deref + Send + Sync,
719                 F: 'static + Deref + Send + Sync,
720                 R: 'static + Deref + Send + Sync,
721                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
722                 L: 'static + Deref + Send + Sync,
723                 P: 'static + Deref + Send + Sync,
724                 Descriptor: 'static + SocketDescriptor + Send + Sync,
725                 CMH: 'static + Deref + Send + Sync,
726                 OMH: 'static + Deref + Send + Sync,
727                 RMH: 'static + Deref + Send + Sync,
728                 EH: 'static + EventHandler + Send,
729                 PS: 'static + Deref + Send,
730                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
731                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
732                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
733                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
734                 UMH: 'static + Deref + Send + Sync,
735                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
736                 S: 'static + Deref<Target = SC> + Send + Sync,
737                 SC: for <'b> WriteableScore<'b>,
738         >(
739                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
740                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
741         ) -> Self
742         where
743                 UL::Target: 'static + UtxoLookup,
744                 CF::Target: 'static + chain::Filter,
745                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
746                 T::Target: 'static + BroadcasterInterface,
747                 ES::Target: 'static + EntropySource,
748                 NS::Target: 'static + NodeSigner,
749                 SP::Target: 'static + SignerProvider,
750                 F::Target: 'static + FeeEstimator,
751                 R::Target: 'static + Router,
752                 L::Target: 'static + Logger,
753                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
754                 CMH::Target: 'static + ChannelMessageHandler,
755                 OMH::Target: 'static + OnionMessageHandler,
756                 RMH::Target: 'static + RoutingMessageHandler,
757                 UMH::Target: 'static + CustomMessageHandler,
758                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
759         {
760                 let stop_thread = Arc::new(AtomicBool::new(false));
761                 let stop_thread_clone = stop_thread.clone();
762                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
763                         let event_handler = |event| {
764                                 let network_graph = gossip_sync.network_graph();
765                                 if let Some(network_graph) = network_graph {
766                                         handle_network_graph_update(network_graph, &event)
767                                 }
768                                 if let Some(ref scorer) = scorer {
769                                         update_scorer(scorer, &event);
770                                 }
771                                 event_handler.handle_event(event);
772                         };
773                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
774                                 channel_manager, channel_manager.process_pending_events(&event_handler),
775                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
776                                 Sleeper::from_two_futures(
777                                         channel_manager.get_persistable_update_future(),
778                                         chain_monitor.get_update_future()
779                                 ).wait_timeout(Duration::from_millis(100)),
780                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
781                 });
782                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
783         }
784
785         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
786         /// [`ChannelManager`].
787         ///
788         /// # Panics
789         ///
790         /// This function panics if the background thread has panicked such as while persisting or
791         /// handling events.
792         ///
793         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
794         pub fn join(mut self) -> Result<(), std::io::Error> {
795                 assert!(self.thread_handle.is_some());
796                 self.join_thread()
797         }
798
799         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
800         /// [`ChannelManager`].
801         ///
802         /// # Panics
803         ///
804         /// This function panics if the background thread has panicked such as while persisting or
805         /// handling events.
806         ///
807         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
808         pub fn stop(mut self) -> Result<(), std::io::Error> {
809                 assert!(self.thread_handle.is_some());
810                 self.stop_and_join_thread()
811         }
812
813         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
814                 self.stop_thread.store(true, Ordering::Release);
815                 self.join_thread()
816         }
817
818         fn join_thread(&mut self) -> Result<(), std::io::Error> {
819                 match self.thread_handle.take() {
820                         Some(handle) => handle.join().unwrap(),
821                         None => Ok(()),
822                 }
823         }
824 }
825
826 #[cfg(feature = "std")]
827 impl Drop for BackgroundProcessor {
828         fn drop(&mut self) {
829                 self.stop_and_join_thread().unwrap();
830         }
831 }
832
833 #[cfg(all(feature = "std", test))]
834 mod tests {
835         use bitcoin::blockdata::block::BlockHeader;
836         use bitcoin::blockdata::constants::genesis_block;
837         use bitcoin::blockdata::locktime::PackedLockTime;
838         use bitcoin::blockdata::transaction::{Transaction, TxOut};
839         use bitcoin::network::constants::Network;
840         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
841         use lightning::chain::{BestBlock, Confirm, chainmonitor};
842         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
843         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
844         use lightning::chain::transaction::OutPoint;
845         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
846         use lightning::{get_event_msg, get_event};
847         use lightning::ln::PaymentHash;
848         use lightning::ln::channelmanager;
849         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
850         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
851         use lightning::ln::msgs::{ChannelMessageHandler, Init};
852         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
853         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
854         use lightning::routing::router::{DefaultRouter, Path, RouteHop};
855         use lightning::routing::scoring::{ChannelUsage, Score};
856         use lightning::util::config::UserConfig;
857         use lightning::util::ser::Writeable;
858         use lightning::util::test_utils;
859         use lightning::util::persist::KVStorePersister;
860         use lightning_persister::FilesystemPersister;
861         use std::collections::VecDeque;
862         use std::{fs, env};
863         use std::path::PathBuf;
864         use std::sync::{Arc, Mutex};
865         use std::sync::mpsc::SyncSender;
866         use std::time::Duration;
867         use bitcoin::hashes::Hash;
868         use bitcoin::TxMerkleNode;
869         use lightning_rapid_gossip_sync::RapidGossipSync;
870         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
871
872         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
873
874         #[derive(Clone, Hash, PartialEq, Eq)]
875         struct TestDescriptor{}
876         impl SocketDescriptor for TestDescriptor {
877                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
878                         0
879                 }
880
881                 fn disconnect_socket(&mut self) {}
882         }
883
884         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
885
886         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
887
888         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
889         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
890
891         struct Node {
892                 node: Arc<ChannelManager>,
893                 p2p_gossip_sync: PGS,
894                 rapid_gossip_sync: RGS,
895                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
896                 chain_monitor: Arc<ChainMonitor>,
897                 persister: Arc<FilesystemPersister>,
898                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
899                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
900                 logger: Arc<test_utils::TestLogger>,
901                 best_block: BestBlock,
902                 scorer: Arc<Mutex<TestScorer>>,
903         }
904
905         impl Node {
906                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
907                         GossipSync::P2P(self.p2p_gossip_sync.clone())
908                 }
909
910                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
911                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
912                 }
913
914                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
915                         GossipSync::None
916                 }
917         }
918
919         impl Drop for Node {
920                 fn drop(&mut self) {
921                         let data_dir = self.persister.get_data_dir();
922                         match fs::remove_dir_all(data_dir.clone()) {
923                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
924                                 _ => {}
925                         }
926                 }
927         }
928
929         struct Persister {
930                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
931                 graph_persistence_notifier: Option<SyncSender<()>>,
932                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
933                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
934                 filesystem_persister: FilesystemPersister,
935         }
936
937         impl Persister {
938                 fn new(data_dir: String) -> Self {
939                         let filesystem_persister = FilesystemPersister::new(data_dir);
940                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
941                 }
942
943                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
944                         Self { graph_error: Some((error, message)), ..self }
945                 }
946
947                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
948                         Self { graph_persistence_notifier: Some(sender), ..self }
949                 }
950
951                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
952                         Self { manager_error: Some((error, message)), ..self }
953                 }
954
955                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
956                         Self { scorer_error: Some((error, message)), ..self }
957                 }
958         }
959
960         impl KVStorePersister for Persister {
961                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
962                         if key == "manager" {
963                                 if let Some((error, message)) = self.manager_error {
964                                         return Err(std::io::Error::new(error, message))
965                                 }
966                         }
967
968                         if key == "network_graph" {
969                                 if let Some(sender) = &self.graph_persistence_notifier {
970                                         match sender.send(()) {
971                                                 Ok(()) => {},
972                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
973                                         }
974                                 };
975
976                                 if let Some((error, message)) = self.graph_error {
977                                         return Err(std::io::Error::new(error, message))
978                                 }
979                         }
980
981                         if key == "scorer" {
982                                 if let Some((error, message)) = self.scorer_error {
983                                         return Err(std::io::Error::new(error, message))
984                                 }
985                         }
986
987                         self.filesystem_persister.persist(key, object)
988                 }
989         }
990
991         struct TestScorer {
992                 event_expectations: Option<VecDeque<TestResult>>,
993         }
994
995         #[derive(Debug)]
996         enum TestResult {
997                 PaymentFailure { path: Path, short_channel_id: u64 },
998                 PaymentSuccess { path: Path },
999                 ProbeFailure { path: Path },
1000                 ProbeSuccess { path: Path },
1001         }
1002
1003         impl TestScorer {
1004                 fn new() -> Self {
1005                         Self { event_expectations: None }
1006                 }
1007
1008                 fn expect(&mut self, expectation: TestResult) {
1009                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1010                 }
1011         }
1012
1013         impl lightning::util::ser::Writeable for TestScorer {
1014                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1015         }
1016
1017         impl Score for TestScorer {
1018                 fn channel_penalty_msat(
1019                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
1020                 ) -> u64 { unimplemented!(); }
1021
1022                 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1023                         if let Some(expectations) = &mut self.event_expectations {
1024                                 match expectations.pop_front().unwrap() {
1025                                         TestResult::PaymentFailure { path, short_channel_id } => {
1026                                                 assert_eq!(actual_path, &path);
1027                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1028                                         },
1029                                         TestResult::PaymentSuccess { path } => {
1030                                                 panic!("Unexpected successful payment path: {:?}", path)
1031                                         },
1032                                         TestResult::ProbeFailure { path } => {
1033                                                 panic!("Unexpected probe failure: {:?}", path)
1034                                         },
1035                                         TestResult::ProbeSuccess { path } => {
1036                                                 panic!("Unexpected probe success: {:?}", path)
1037                                         }
1038                                 }
1039                         }
1040                 }
1041
1042                 fn payment_path_successful(&mut self, actual_path: &Path) {
1043                         if let Some(expectations) = &mut self.event_expectations {
1044                                 match expectations.pop_front().unwrap() {
1045                                         TestResult::PaymentFailure { path, .. } => {
1046                                                 panic!("Unexpected payment path failure: {:?}", path)
1047                                         },
1048                                         TestResult::PaymentSuccess { path } => {
1049                                                 assert_eq!(actual_path, &path);
1050                                         },
1051                                         TestResult::ProbeFailure { path } => {
1052                                                 panic!("Unexpected probe failure: {:?}", path)
1053                                         },
1054                                         TestResult::ProbeSuccess { path } => {
1055                                                 panic!("Unexpected probe success: {:?}", path)
1056                                         }
1057                                 }
1058                         }
1059                 }
1060
1061                 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1062                         if let Some(expectations) = &mut self.event_expectations {
1063                                 match expectations.pop_front().unwrap() {
1064                                         TestResult::PaymentFailure { path, .. } => {
1065                                                 panic!("Unexpected payment path failure: {:?}", path)
1066                                         },
1067                                         TestResult::PaymentSuccess { path } => {
1068                                                 panic!("Unexpected payment path success: {:?}", path)
1069                                         },
1070                                         TestResult::ProbeFailure { path } => {
1071                                                 assert_eq!(actual_path, &path);
1072                                         },
1073                                         TestResult::ProbeSuccess { path } => {
1074                                                 panic!("Unexpected probe success: {:?}", path)
1075                                         }
1076                                 }
1077                         }
1078                 }
1079                 fn probe_successful(&mut self, actual_path: &Path) {
1080                         if let Some(expectations) = &mut self.event_expectations {
1081                                 match expectations.pop_front().unwrap() {
1082                                         TestResult::PaymentFailure { path, .. } => {
1083                                                 panic!("Unexpected payment path failure: {:?}", path)
1084                                         },
1085                                         TestResult::PaymentSuccess { path } => {
1086                                                 panic!("Unexpected payment path success: {:?}", path)
1087                                         },
1088                                         TestResult::ProbeFailure { path } => {
1089                                                 panic!("Unexpected probe failure: {:?}", path)
1090                                         },
1091                                         TestResult::ProbeSuccess { path } => {
1092                                                 assert_eq!(actual_path, &path);
1093                                         }
1094                                 }
1095                         }
1096                 }
1097         }
1098
1099         impl Drop for TestScorer {
1100                 fn drop(&mut self) {
1101                         if std::thread::panicking() {
1102                                 return;
1103                         }
1104
1105                         if let Some(event_expectations) = &self.event_expectations {
1106                                 if !event_expectations.is_empty() {
1107                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1108                                 }
1109                         }
1110                 }
1111         }
1112
1113         fn get_full_filepath(filepath: String, filename: String) -> String {
1114                 let mut path = PathBuf::from(filepath);
1115                 path.push(filename);
1116                 path.to_str().unwrap().to_string()
1117         }
1118
1119         fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1120                 let persist_temp_path = env::temp_dir().join(persist_dir);
1121                 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1122                 let network = Network::Testnet;
1123                 let mut nodes = Vec::new();
1124                 for i in 0..num_nodes {
1125                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1126                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1127                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1128                         let genesis_block = genesis_block(network);
1129                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1130                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1131                         let seed = [i as u8; 32];
1132                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1133                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1134                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
1135                         let now = Duration::from_secs(genesis_block.header.time as u64);
1136                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1137                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1138                         let best_block = BestBlock::from_network(network);
1139                         let params = ChainParameters { network, best_block };
1140                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1141                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1142                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1143                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1144                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1145                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1146                         nodes.push(node);
1147                 }
1148
1149                 for i in 0..num_nodes {
1150                         for j in (i+1)..num_nodes {
1151                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1152                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1153                         }
1154                 }
1155
1156                 (persist_dir, nodes)
1157         }
1158
1159         macro_rules! open_channel {
1160                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1161                         begin_open_channel!($node_a, $node_b, $channel_value);
1162                         let events = $node_a.node.get_and_clear_pending_events();
1163                         assert_eq!(events.len(), 1);
1164                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1165                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1166                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1167                         get_event!($node_b, Event::ChannelPending);
1168                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1169                         get_event!($node_a, Event::ChannelPending);
1170                         tx
1171                 }}
1172         }
1173
1174         macro_rules! begin_open_channel {
1175                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1176                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1177                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1178                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1179                 }}
1180         }
1181
1182         macro_rules! handle_funding_generation_ready {
1183                 ($event: expr, $channel_value: expr) => {{
1184                         match $event {
1185                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1186                                         assert_eq!(channel_value_satoshis, $channel_value);
1187                                         assert_eq!(user_channel_id, 42);
1188
1189                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1190                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1191                                         }]};
1192                                         (temporary_channel_id, tx)
1193                                 },
1194                                 _ => panic!("Unexpected event"),
1195                         }
1196                 }}
1197         }
1198
1199         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1200                 for i in 1..=depth {
1201                         let prev_blockhash = node.best_block.block_hash();
1202                         let height = node.best_block.height() + 1;
1203                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1204                         let txdata = vec![(0, tx)];
1205                         node.best_block = BestBlock::new(header.block_hash(), height);
1206                         match i {
1207                                 1 => {
1208                                         node.node.transactions_confirmed(&header, &txdata, height);
1209                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1210                                 },
1211                                 x if x == depth => {
1212                                         node.node.best_block_updated(&header, height);
1213                                         node.chain_monitor.best_block_updated(&header, height);
1214                                 },
1215                                 _ => {},
1216                         }
1217                 }
1218         }
1219         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1220                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1221         }
1222
1223         #[test]
1224         fn test_background_processor() {
1225                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1226                 // updates. Also test that when new updates are available, the manager signals that it needs
1227                 // re-persistence and is successfully re-persisted.
1228                 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1229
1230                 // Go through the channel creation process so that each node has something to persist. Since
1231                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1232                 // avoid a race with processing events.
1233                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1234
1235                 // Initiate the background processors to watch each node.
1236                 let data_dir = nodes[0].persister.get_data_dir();
1237                 let persister = Arc::new(Persister::new(data_dir));
1238                 let event_handler = |_: _| {};
1239                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1240
1241                 macro_rules! check_persisted_data {
1242                         ($node: expr, $filepath: expr) => {
1243                                 let mut expected_bytes = Vec::new();
1244                                 loop {
1245                                         expected_bytes.clear();
1246                                         match $node.write(&mut expected_bytes) {
1247                                                 Ok(()) => {
1248                                                         match std::fs::read($filepath) {
1249                                                                 Ok(bytes) => {
1250                                                                         if bytes == expected_bytes {
1251                                                                                 break
1252                                                                         } else {
1253                                                                                 continue
1254                                                                         }
1255                                                                 },
1256                                                                 Err(_) => continue
1257                                                         }
1258                                                 },
1259                                                 Err(e) => panic!("Unexpected error: {}", e)
1260                                         }
1261                                 }
1262                         }
1263                 }
1264
1265                 // Check that the initial channel manager data is persisted as expected.
1266                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1267                 check_persisted_data!(nodes[0].node, filepath.clone());
1268
1269                 loop {
1270                         if !nodes[0].node.get_persistence_condvar_value() { break }
1271                 }
1272
1273                 // Force-close the channel.
1274                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1275
1276                 // Check that the force-close updates are persisted.
1277                 check_persisted_data!(nodes[0].node, filepath.clone());
1278                 loop {
1279                         if !nodes[0].node.get_persistence_condvar_value() { break }
1280                 }
1281
1282                 // Check network graph is persisted
1283                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1284                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1285
1286                 // Check scorer is persisted
1287                 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1288                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1289
1290                 if !std::thread::panicking() {
1291                         bg_processor.stop().unwrap();
1292                 }
1293         }
1294
1295         #[test]
1296         fn test_timer_tick_called() {
1297                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1298                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1299                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1300                 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1301                 let data_dir = nodes[0].persister.get_data_dir();
1302                 let persister = Arc::new(Persister::new(data_dir));
1303                 let event_handler = |_: _| {};
1304                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1305                 loop {
1306                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1307                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1308                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1309                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1310                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1311                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1312                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1313                                 break
1314                         }
1315                 }
1316
1317                 if !std::thread::panicking() {
1318                         bg_processor.stop().unwrap();
1319                 }
1320         }
1321
1322         #[test]
1323         fn test_channel_manager_persist_error() {
1324                 // Test that if we encounter an error during manager persistence, the thread panics.
1325                 let (_, nodes) = create_nodes(2, "test_persist_error");
1326                 open_channel!(nodes[0], nodes[1], 100000);
1327
1328                 let data_dir = nodes[0].persister.get_data_dir();
1329                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1330                 let event_handler = |_: _| {};
1331                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1332                 match bg_processor.join() {
1333                         Ok(_) => panic!("Expected error persisting manager"),
1334                         Err(e) => {
1335                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1336                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1337                         },
1338                 }
1339         }
1340
1341         #[tokio::test]
1342         #[cfg(feature = "futures")]
1343         async fn test_channel_manager_persist_error_async() {
1344                 // Test that if we encounter an error during manager persistence, the thread panics.
1345                 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1346                 open_channel!(nodes[0], nodes[1], 100000);
1347
1348                 let data_dir = nodes[0].persister.get_data_dir();
1349                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1350
1351                 let bp_future = super::process_events_async(
1352                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1353                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1354                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1355                                 Box::pin(async move {
1356                                         tokio::time::sleep(dur).await;
1357                                         false // Never exit
1358                                 })
1359                         }, false,
1360                 );
1361                 match bp_future.await {
1362                         Ok(_) => panic!("Expected error persisting manager"),
1363                         Err(e) => {
1364                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1365                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1366                         },
1367                 }
1368         }
1369
1370         #[test]
1371         fn test_network_graph_persist_error() {
1372                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1373                 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1374                 let data_dir = nodes[0].persister.get_data_dir();
1375                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1376                 let event_handler = |_: _| {};
1377                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1378
1379                 match bg_processor.stop() {
1380                         Ok(_) => panic!("Expected error persisting network graph"),
1381                         Err(e) => {
1382                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1383                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1384                         },
1385                 }
1386         }
1387
1388         #[test]
1389         fn test_scorer_persist_error() {
1390                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1391                 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1392                 let data_dir = nodes[0].persister.get_data_dir();
1393                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1394                 let event_handler = |_: _| {};
1395                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1396
1397                 match bg_processor.stop() {
1398                         Ok(_) => panic!("Expected error persisting scorer"),
1399                         Err(e) => {
1400                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1401                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1402                         },
1403                 }
1404         }
1405
1406         #[test]
1407         fn test_background_event_handling() {
1408                 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1409                 let channel_value = 100000;
1410                 let data_dir = nodes[0].persister.get_data_dir();
1411                 let persister = Arc::new(Persister::new(data_dir.clone()));
1412
1413                 // Set up a background event handler for FundingGenerationReady events.
1414                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1415                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1416                 let event_handler = move |event: Event| match event {
1417                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1418                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1419                         Event::ChannelReady { .. } => {},
1420                         _ => panic!("Unexpected event: {:?}", event),
1421                 };
1422
1423                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1424
1425                 // Open a channel and check that the FundingGenerationReady event was handled.
1426                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1427                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1428                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1429                         .expect("FundingGenerationReady not handled within deadline");
1430                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1431                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1432                 get_event!(nodes[1], Event::ChannelPending);
1433                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1434                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1435                         .expect("ChannelPending not handled within deadline");
1436
1437                 // Confirm the funding transaction.
1438                 confirm_transaction(&mut nodes[0], &funding_tx);
1439                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1440                 confirm_transaction(&mut nodes[1], &funding_tx);
1441                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1442                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1443                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1444                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1445                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1446
1447                 if !std::thread::panicking() {
1448                         bg_processor.stop().unwrap();
1449                 }
1450
1451                 // Set up a background event handler for SpendableOutputs events.
1452                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1453                 let event_handler = move |event: Event| match event {
1454                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1455                         Event::ChannelReady { .. } => {},
1456                         Event::ChannelClosed { .. } => {},
1457                         _ => panic!("Unexpected event: {:?}", event),
1458                 };
1459                 let persister = Arc::new(Persister::new(data_dir));
1460                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1461
1462                 // Force close the channel and check that the SpendableOutputs event was handled.
1463                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1464                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1465                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1466
1467                 let event = receiver
1468                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1469                         .expect("Events not handled within deadline");
1470                 match event {
1471                         Event::SpendableOutputs { .. } => {},
1472                         _ => panic!("Unexpected event: {:?}", event),
1473                 }
1474
1475                 if !std::thread::panicking() {
1476                         bg_processor.stop().unwrap();
1477                 }
1478         }
1479
1480         #[test]
1481         fn test_scorer_persistence() {
1482                 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1483                 let data_dir = nodes[0].persister.get_data_dir();
1484                 let persister = Arc::new(Persister::new(data_dir));
1485                 let event_handler = |_: _| {};
1486                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1487
1488                 loop {
1489                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1490                         let expected_log = "Persisting scorer".to_string();
1491                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1492                                 break
1493                         }
1494                 }
1495
1496                 if !std::thread::panicking() {
1497                         bg_processor.stop().unwrap();
1498                 }
1499         }
1500
1501         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1502                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1503                         let features = ChannelFeatures::empty();
1504                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1505                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1506                         ).expect("Failed to update channel from partial announcement");
1507                         let original_graph_description = $nodes[0].network_graph.to_string();
1508                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1509                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1510
1511                         loop {
1512                                 $sleep;
1513                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1514                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1515                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1516                                         .unwrap_or(&0) > 1
1517                                 {
1518                                         // Wait until the loop has gone around at least twice.
1519                                         break
1520                                 }
1521                         }
1522
1523                         let initialization_input = vec![
1524                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1525                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1526                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1527                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1528                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1529                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1530                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1531                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1532                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1533                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1534                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1535                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1536                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1537                         ];
1538                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1539
1540                         // this should have added two channels and pruned the previous one.
1541                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1542
1543                         $receive.expect("Network graph not pruned within deadline");
1544
1545                         // all channels should now be pruned
1546                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1547                 }
1548         }
1549
1550         #[test]
1551         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1552                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1553
1554                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1555                 let data_dir = nodes[0].persister.get_data_dir();
1556                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1557
1558                 let event_handler = |_: _| {};
1559                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1560
1561                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1562                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1563                         std::thread::sleep(Duration::from_millis(1)));
1564
1565                 background_processor.stop().unwrap();
1566         }
1567
1568         #[tokio::test]
1569         #[cfg(feature = "futures")]
1570         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1571                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1572
1573                 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1574                 let data_dir = nodes[0].persister.get_data_dir();
1575                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1576
1577                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1578                 let bp_future = super::process_events_async(
1579                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1580                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1581                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1582                                 let mut exit_receiver = exit_receiver.clone();
1583                                 Box::pin(async move {
1584                                         tokio::select! {
1585                                                 _ = tokio::time::sleep(dur) => false,
1586                                                 _ = exit_receiver.changed() => true,
1587                                         }
1588                                 })
1589                         }, false,
1590                 );
1591
1592                 let t1 = tokio::spawn(bp_future);
1593                 let t2 = tokio::spawn(async move {
1594                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1595                                 let mut i = 0;
1596                                 loop {
1597                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1598                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1599                                         assert!(i < 5);
1600                                         i += 1;
1601                                 }
1602                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1603                         exit_sender.send(()).unwrap();
1604                 });
1605                 let (r1, r2) = tokio::join!(t1, t2);
1606                 r1.unwrap().unwrap();
1607                 r2.unwrap()
1608         }
1609
1610         macro_rules! do_test_payment_path_scoring {
1611                 ($nodes: expr, $receive: expr) => {
1612                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1613                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1614                         // public or else we won't score it).
1615                         // A background event handler for FundingGenerationReady events must be hooked up to a
1616                         // running background processor.
1617                         let scored_scid = 4242;
1618                         let secp_ctx = Secp256k1::new();
1619                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1620                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1621
1622                         let path = Path { hops: vec![RouteHop {
1623                                 pubkey: node_1_id,
1624                                 node_features: NodeFeatures::empty(),
1625                                 short_channel_id: scored_scid,
1626                                 channel_features: ChannelFeatures::empty(),
1627                                 fee_msat: 0,
1628                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1629                         }], blinded_tail: None };
1630
1631                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1632                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1633                                 payment_id: None,
1634                                 payment_hash: PaymentHash([42; 32]),
1635                                 payment_failed_permanently: false,
1636                                 failure: PathFailure::OnPath { network_update: None },
1637                                 path: path.clone(),
1638                                 short_channel_id: Some(scored_scid),
1639                         });
1640                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1641                         match event {
1642                                 Event::PaymentPathFailed { .. } => {},
1643                                 _ => panic!("Unexpected event"),
1644                         }
1645
1646                         // Ensure we'll score payments that were explicitly failed back by the destination as
1647                         // ProbeSuccess.
1648                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1649                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1650                                 payment_id: None,
1651                                 payment_hash: PaymentHash([42; 32]),
1652                                 payment_failed_permanently: true,
1653                                 failure: PathFailure::OnPath { network_update: None },
1654                                 path: path.clone(),
1655                                 short_channel_id: None,
1656                         });
1657                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1658                         match event {
1659                                 Event::PaymentPathFailed { .. } => {},
1660                                 _ => panic!("Unexpected event"),
1661                         }
1662
1663                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1664                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1665                                 payment_id: PaymentId([42; 32]),
1666                                 payment_hash: None,
1667                                 path: path.clone(),
1668                         });
1669                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1670                         match event {
1671                                 Event::PaymentPathSuccessful { .. } => {},
1672                                 _ => panic!("Unexpected event"),
1673                         }
1674
1675                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1676                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1677                                 payment_id: PaymentId([42; 32]),
1678                                 payment_hash: PaymentHash([42; 32]),
1679                                 path: path.clone(),
1680                         });
1681                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1682                         match event {
1683                                 Event::ProbeSuccessful  { .. } => {},
1684                                 _ => panic!("Unexpected event"),
1685                         }
1686
1687                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1688                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1689                                 payment_id: PaymentId([42; 32]),
1690                                 payment_hash: PaymentHash([42; 32]),
1691                                 path,
1692                                 short_channel_id: Some(scored_scid),
1693                         });
1694                         let event = $receive.expect("ProbeFailure not handled within deadline");
1695                         match event {
1696                                 Event::ProbeFailed { .. } => {},
1697                                 _ => panic!("Unexpected event"),
1698                         }
1699                 }
1700         }
1701
1702         #[test]
1703         fn test_payment_path_scoring() {
1704                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1705                 let event_handler = move |event: Event| match event {
1706                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1707                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1708                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1709                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1710                         _ => panic!("Unexpected event: {:?}", event),
1711                 };
1712
1713                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1714                 let data_dir = nodes[0].persister.get_data_dir();
1715                 let persister = Arc::new(Persister::new(data_dir));
1716                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1717
1718                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1719
1720                 if !std::thread::panicking() {
1721                         bg_processor.stop().unwrap();
1722                 }
1723         }
1724
1725         #[tokio::test]
1726         #[cfg(feature = "futures")]
1727         async fn test_payment_path_scoring_async() {
1728                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1729                 let event_handler = move |event: Event| {
1730                         let sender_ref = sender.clone();
1731                         async move {
1732                                 match event {
1733                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1734                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1735                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1736                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1737                                         _ => panic!("Unexpected event: {:?}", event),
1738                                 }
1739                         }
1740                 };
1741
1742                 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1743                 let data_dir = nodes[0].persister.get_data_dir();
1744                 let persister = Arc::new(Persister::new(data_dir));
1745
1746                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1747
1748                 let bp_future = super::process_events_async(
1749                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1750                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1751                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1752                                 let mut exit_receiver = exit_receiver.clone();
1753                                 Box::pin(async move {
1754                                         tokio::select! {
1755                                                 _ = tokio::time::sleep(dur) => false,
1756                                                 _ = exit_receiver.changed() => true,
1757                                         }
1758                                 })
1759                         }, false,
1760                 );
1761                 let t1 = tokio::spawn(bp_future);
1762                 let t2 = tokio::spawn(async move {
1763                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1764                         exit_sender.send(()).unwrap();
1765                 });
1766
1767                 let (r1, r2) = tokio::join!(t1, t2);
1768                 r1.unwrap().unwrap();
1769                 r2.unwrap()
1770         }
1771 }