Add Tokio example to `process_events_async` docs
[rust-lightning] / lightning-background-processor / src / lib.rs
1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
4
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
8
9 #![deny(missing_docs)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15
16 #[cfg(any(test, feature = "std"))]
17 extern crate core;
18
19 #[cfg(not(feature = "std"))]
20 extern crate alloc;
21
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
24
25 use lightning::chain;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::chain::keysinterface::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::{ChannelMessageHandler, OnionMessageHandler, RoutingMessageHandler};
34 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{Score, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
44
45 use core::ops::Deref;
46 use core::time::Duration;
47
48 #[cfg(feature = "std")]
49 use std::sync::Arc;
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
56
57 #[cfg(not(feature = "std"))]
58 use alloc::vec::Vec;
59
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 ///   writing it to disk/backups by invoking the callback given to it at startup.
66 ///   [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
68 ///   and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 ///   [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 ///
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
74 ///
75 /// # Note
76 ///
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
81 ///
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 #[cfg(feature = "std")]
85 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
86 pub struct BackgroundProcessor {
87         stop_thread: Arc<AtomicBool>,
88         thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
89 }
90
91 #[cfg(not(test))]
92 const FRESHNESS_TIMER: u64 = 60;
93 #[cfg(test)]
94 const FRESHNESS_TIMER: u64 = 1;
95
96 #[cfg(all(not(test), not(debug_assertions)))]
97 const PING_TIMER: u64 = 10;
98 /// Signature operations take a lot longer without compiler optimisations.
99 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
100 /// timeout is reached.
101 #[cfg(all(not(test), debug_assertions))]
102 const PING_TIMER: u64 = 30;
103 #[cfg(test)]
104 const PING_TIMER: u64 = 1;
105
106 /// Prune the network graph of stale entries hourly.
107 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
108
109 #[cfg(not(test))]
110 const SCORER_PERSIST_TIMER: u64 = 30;
111 #[cfg(test)]
112 const SCORER_PERSIST_TIMER: u64 = 1;
113
114 #[cfg(not(test))]
115 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
116 #[cfg(test)]
117 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
118
119 #[cfg(not(test))]
120 const REBROADCAST_TIMER: u64 = 30;
121 #[cfg(test)]
122 const REBROADCAST_TIMER: u64 = 1;
123
124 #[cfg(feature = "futures")]
125 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
126 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
127 #[cfg(feature = "futures")]
128 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
129         min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
130
131 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
132 pub enum GossipSync<
133         P: Deref<Target = P2PGossipSync<G, U, L>>,
134         R: Deref<Target = RapidGossipSync<G, L>>,
135         G: Deref<Target = NetworkGraph<L>>,
136         U: Deref,
137         L: Deref,
138 >
139 where U::Target: UtxoLookup, L::Target: Logger {
140         /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
141         P2P(P),
142         /// Rapid gossip sync from a trusted server.
143         Rapid(R),
144         /// No gossip sync.
145         None,
146 }
147
148 impl<
149         P: Deref<Target = P2PGossipSync<G, U, L>>,
150         R: Deref<Target = RapidGossipSync<G, L>>,
151         G: Deref<Target = NetworkGraph<L>>,
152         U: Deref,
153         L: Deref,
154 > GossipSync<P, R, G, U, L>
155 where U::Target: UtxoLookup, L::Target: Logger {
156         fn network_graph(&self) -> Option<&G> {
157                 match self {
158                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
159                         GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
160                         GossipSync::None => None,
161                 }
162         }
163
164         fn prunable_network_graph(&self) -> Option<&G> {
165                 match self {
166                         GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
167                         GossipSync::Rapid(gossip_sync) => {
168                                 if gossip_sync.is_initial_sync_complete() {
169                                         Some(gossip_sync.network_graph())
170                                 } else {
171                                         None
172                                 }
173                         },
174                         GossipSync::None => None,
175                 }
176         }
177 }
178
179 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
180 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
181         GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
182 where
183         U::Target: UtxoLookup,
184         L::Target: Logger,
185 {
186         /// Initializes a new [`GossipSync::P2P`] variant.
187         pub fn p2p(gossip_sync: P) -> Self {
188                 GossipSync::P2P(gossip_sync)
189         }
190 }
191
192 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
193 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
194         GossipSync<
195                 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
196                 R,
197                 G,
198                 &'a (dyn UtxoLookup + Send + Sync),
199                 L,
200         >
201 where
202         L::Target: Logger,
203 {
204         /// Initializes a new [`GossipSync::Rapid`] variant.
205         pub fn rapid(gossip_sync: R) -> Self {
206                 GossipSync::Rapid(gossip_sync)
207         }
208 }
209
210 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
211 impl<'a, L: Deref>
212         GossipSync<
213                 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
214                 &RapidGossipSync<&'a NetworkGraph<L>, L>,
215                 &'a NetworkGraph<L>,
216                 &'a (dyn UtxoLookup + Send + Sync),
217                 L,
218         >
219 where
220         L::Target: Logger,
221 {
222         /// Initializes a new [`GossipSync::None`] variant.
223         pub fn none() -> Self {
224                 GossipSync::None
225         }
226 }
227
228 fn handle_network_graph_update<L: Deref>(
229         network_graph: &NetworkGraph<L>, event: &Event
230 ) where L::Target: Logger {
231         if let Event::PaymentPathFailed {
232                 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
233         {
234                 network_graph.handle_network_update(upd);
235         }
236 }
237
238 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
239         scorer: &'a S, event: &Event
240 ) {
241         let mut score = scorer.lock();
242         match event {
243                 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
244                         let path = path.iter().collect::<Vec<_>>();
245                         score.payment_path_failed(&path, *scid);
246                 },
247                 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
248                         // Reached if the destination explicitly failed it back. We treat this as a successful probe
249                         // because the payment made it all the way to the destination with sufficient liquidity.
250                         let path = path.iter().collect::<Vec<_>>();
251                         score.probe_successful(&path);
252                 },
253                 Event::PaymentPathSuccessful { path, .. } => {
254                         let path = path.iter().collect::<Vec<_>>();
255                         score.payment_path_successful(&path);
256                 },
257                 Event::ProbeSuccessful { path, .. } => {
258                         let path = path.iter().collect::<Vec<_>>();
259                         score.probe_successful(&path);
260                 },
261                 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
262                         let path = path.iter().collect::<Vec<_>>();
263                         score.probe_failed(&path, *scid);
264                 },
265                 _ => {},
266         }
267 }
268
269 macro_rules! define_run_body {
270         ($persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
271          $channel_manager: ident, $process_channel_manager_events: expr,
272          $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident,
273          $loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
274          $check_slow_await: expr)
275         => { {
276                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
277                 $channel_manager.timer_tick_occurred();
278                 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
279                 $chain_monitor.rebroadcast_pending_claims();
280
281                 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
282                 let mut last_ping_call = $get_timer(PING_TIMER);
283                 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
284                 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
285                 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
286                 let mut have_pruned = false;
287
288                 loop {
289                         $process_channel_manager_events;
290                         $process_chain_monitor_events;
291
292                         // Note that the PeerManager::process_events may block on ChannelManager's locks,
293                         // hence it comes last here. When the ChannelManager finishes whatever it's doing,
294                         // we want to ensure we get into `persist_manager` as quickly as we can, especially
295                         // without running the normal event processing above and handing events to users.
296                         //
297                         // Specifically, on an *extremely* slow machine, we may see ChannelManager start
298                         // processing a message effectively at any point during this loop. In order to
299                         // minimize the time between such processing completing and persisting the updated
300                         // ChannelManager, we want to minimize methods blocking on a ChannelManager
301                         // generally, and as a fallback place such blocking only immediately before
302                         // persistence.
303                         $peer_manager.process_events();
304
305                         // We wait up to 100ms, but track how long it takes to detect being put to sleep,
306                         // see `await_start`'s use below.
307                         let mut await_start = None;
308                         if $check_slow_await { await_start = Some($get_timer(1)); }
309                         let updates_available = $await;
310                         let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
311
312                         if updates_available {
313                                 log_trace!($logger, "Persisting ChannelManager...");
314                                 $persister.persist_manager(&*$channel_manager)?;
315                                 log_trace!($logger, "Done persisting ChannelManager.");
316                         }
317                         // Exit the loop if the background processor was requested to stop.
318                         if $loop_exit_check {
319                                 log_trace!($logger, "Terminating background processor.");
320                                 break;
321                         }
322                         if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
323                                 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
324                                 $channel_manager.timer_tick_occurred();
325                                 last_freshness_call = $get_timer(FRESHNESS_TIMER);
326                         }
327                         if await_slow {
328                                 // On various platforms, we may be starved of CPU cycles for several reasons.
329                                 // E.g. on iOS, if we've been in the background, we will be entirely paused.
330                                 // Similarly, if we're on a desktop platform and the device has been asleep, we
331                                 // may not get any cycles.
332                                 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
333                                 // full second, at which point we assume sockets may have been killed (they
334                                 // appear to be at least on some platforms, even if it has only been a second).
335                                 // Note that we have to take care to not get here just because user event
336                                 // processing was slow at the top of the loop. For example, the sample client
337                                 // may call Bitcoin Core RPCs during event handling, which very often takes
338                                 // more than a handful of seconds to complete, and shouldn't disconnect all our
339                                 // peers.
340                                 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
341                                 $peer_manager.disconnect_all_peers();
342                                 last_ping_call = $get_timer(PING_TIMER);
343                         } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
344                                 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
345                                 $peer_manager.timer_tick_occurred();
346                                 last_ping_call = $get_timer(PING_TIMER);
347                         }
348
349                         // Note that we want to run a graph prune once not long after startup before
350                         // falling back to our usual hourly prunes. This avoids short-lived clients never
351                         // pruning their network graph. We run once 60 seconds after startup before
352                         // continuing our normal cadence.
353                         let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
354                         if $timer_elapsed(&mut last_prune_call, prune_timer) {
355                                 // The network graph must not be pruned while rapid sync completion is pending
356                                 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
357                                         #[cfg(feature = "std")] {
358                                                 log_trace!($logger, "Pruning and persisting network graph.");
359                                                 network_graph.remove_stale_channels_and_tracking();
360                                         }
361                                         #[cfg(not(feature = "std"))] {
362                                                 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
363                                                 log_trace!($logger, "Persisting network graph.");
364                                         }
365
366                                         if let Err(e) = $persister.persist_graph(network_graph) {
367                                                 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
368                                         }
369
370                                         have_pruned = true;
371                                 }
372                                 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
373                                 last_prune_call = $get_timer(prune_timer);
374                         }
375
376                         if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
377                                 if let Some(ref scorer) = $scorer {
378                                         log_trace!($logger, "Persisting scorer");
379                                         if let Err(e) = $persister.persist_scorer(&scorer) {
380                                                 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
381                                         }
382                                 }
383                                 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
384                         }
385
386                         if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
387                                 log_trace!($logger, "Rebroadcasting monitor's pending claims");
388                                 $chain_monitor.rebroadcast_pending_claims();
389                                 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
390                         }
391                 }
392
393                 // After we exit, ensure we persist the ChannelManager one final time - this avoids
394                 // some races where users quit while channel updates were in-flight, with
395                 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
396                 $persister.persist_manager(&*$channel_manager)?;
397
398                 // Persist Scorer on exit
399                 if let Some(ref scorer) = $scorer {
400                         $persister.persist_scorer(&scorer)?;
401                 }
402
403                 // Persist NetworkGraph on exit
404                 if let Some(network_graph) = $gossip_sync.network_graph() {
405                         $persister.persist_graph(network_graph)?;
406                 }
407
408                 Ok(())
409         } }
410 }
411
412 #[cfg(feature = "futures")]
413 pub(crate) mod futures_util {
414         use core::future::Future;
415         use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
416         use core::pin::Pin;
417         use core::marker::Unpin;
418         pub(crate) struct Selector<
419                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
420         > {
421                 pub a: A,
422                 pub b: B,
423                 pub c: C,
424         }
425         pub(crate) enum SelectorOutput {
426                 A, B, C(bool),
427         }
428
429         impl<
430                 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
431         > Future for Selector<A, B, C> {
432                 type Output = SelectorOutput;
433                 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
434                         match Pin::new(&mut self.a).poll(ctx) {
435                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
436                                 Poll::Pending => {},
437                         }
438                         match Pin::new(&mut self.b).poll(ctx) {
439                                 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
440                                 Poll::Pending => {},
441                         }
442                         match Pin::new(&mut self.c).poll(ctx) {
443                                 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
444                                 Poll::Pending => {},
445                         }
446                         Poll::Pending
447                 }
448         }
449
450         // If we want to poll a future without an async context to figure out if it has completed or
451         // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
452         // but sadly there's a good bit of boilerplate here.
453         fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
454         fn dummy_waker_action(_: *const ()) { }
455
456         const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
457                 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
458         pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
459 }
460 #[cfg(feature = "futures")]
461 use futures_util::{Selector, SelectorOutput, dummy_waker};
462 #[cfg(feature = "futures")]
463 use core::task;
464
465 /// Processes background events in a future.
466 ///
467 /// `sleeper` should return a future which completes in the given amount of time and returns a
468 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
469 /// future which outputs `true`, the loop will exit and this function's future will complete.
470 /// The `sleeper` future is free to return early after it has triggered the exit condition.
471 ///
472 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
473 ///
474 /// Requires the `futures` feature. Note that while this method is available without the `std`
475 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
476 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
477 /// manually instead.
478 ///
479 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
480 /// mobile device, where we may need to check for interruption of the application regularly. If you
481 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
482 /// are hundreds or thousands of simultaneous process calls running.
483 ///
484 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
485 /// could setup `process_events_async` like this:
486 /// ```
487 /// # struct MyPersister {}
488 /// # impl lightning::util::persist::KVStorePersister for MyPersister {
489 /// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
490 /// # }
491 /// # struct MyEventHandler {}
492 /// # impl MyEventHandler {
493 /// #     async fn handle_event(&self, _: lightning::events::Event) {}
494 /// # }
495 /// # #[derive(Eq, PartialEq, Clone, Hash)]
496 /// # struct MySocketDescriptor {}
497 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
498 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
499 /// #     fn disconnect_socket(&mut self) {}
500 /// # }
501 /// # use std::sync::{Arc, Mutex};
502 /// # use std::sync::atomic::{AtomicBool, Ordering};
503 /// # use lightning_background_processor::{process_events_async, GossipSync};
504 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
505 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
506 /// # type MyNodeSigner = dyn lightning::chain::keysinterface::NodeSigner + Send + Sync;
507 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
508 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
509 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
510 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
511 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
512 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
513 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
514 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
515 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
516 ///
517 /// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
518 ///     let background_persister = Arc::clone(&my_persister);
519 ///     let background_event_handler = Arc::clone(&my_event_handler);
520 ///     let background_chain_mon = Arc::clone(&my_chain_monitor);
521 ///     let background_chan_man = Arc::clone(&my_channel_manager);
522 ///     let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
523 ///     let background_peer_man = Arc::clone(&my_peer_manager);
524 ///     let background_logger = Arc::clone(&my_logger);
525 ///     let background_scorer = Arc::clone(&my_scorer);
526 ///
527 ///     // Setup the sleeper.
528 ///     let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
529 ///
530 ///     let sleeper = move |d| {
531 ///             let mut receiver = stop_receiver.clone();
532 ///             Box::pin(async move {
533 ///                     tokio::select!{
534 ///                             _ = tokio::time::sleep(d) => false,
535 ///                             _ = receiver.changed() => true,
536 ///                     }
537 ///             })
538 ///     };
539 ///
540 ///     let mobile_interruptable_platform = false;
541 ///
542 ///     let handle = tokio::spawn(async move {
543 ///             process_events_async(
544 ///                     background_persister,
545 ///                     |e| background_event_handler.handle_event(e),
546 ///                     background_chain_mon,
547 ///                     background_chan_man,
548 ///                     background_gossip_sync,
549 ///                     background_peer_man,
550 ///                     background_logger,
551 ///                     Some(background_scorer),
552 ///                     sleeper,
553 ///                     mobile_interruptable_platform,
554 ///                     )
555 ///                     .await
556 ///                     .expect("Failed to process events");
557 ///     });
558 ///
559 ///     // Stop the background processing.
560 ///     stop_sender.send(()).unwrap();
561 ///     handle.await.unwrap();
562 ///     # }
563 ///```
564 #[cfg(feature = "futures")]
565 pub async fn process_events_async<
566         'a,
567         UL: 'static + Deref + Send + Sync,
568         CF: 'static + Deref + Send + Sync,
569         CW: 'static + Deref + Send + Sync,
570         T: 'static + Deref + Send + Sync,
571         ES: 'static + Deref + Send + Sync,
572         NS: 'static + Deref + Send + Sync,
573         SP: 'static + Deref + Send + Sync,
574         F: 'static + Deref + Send + Sync,
575         R: 'static + Deref + Send + Sync,
576         G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
577         L: 'static + Deref + Send + Sync,
578         P: 'static + Deref + Send + Sync,
579         Descriptor: 'static + SocketDescriptor + Send + Sync,
580         CMH: 'static + Deref + Send + Sync,
581         RMH: 'static + Deref + Send + Sync,
582         OMH: 'static + Deref + Send + Sync,
583         EventHandlerFuture: core::future::Future<Output = ()>,
584         EventHandler: Fn(Event) -> EventHandlerFuture,
585         PS: 'static + Deref + Send,
586         M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
587         CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
588         PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
589         RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
590         UMH: 'static + Deref + Send + Sync,
591         PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
592         S: 'static + Deref<Target = SC> + Send + Sync,
593         SC: for<'b> WriteableScore<'b>,
594         SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
595         Sleeper: Fn(Duration) -> SleepFuture
596 >(
597         persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
598         gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
599         sleeper: Sleeper, mobile_interruptable_platform: bool,
600 ) -> Result<(), lightning::io::Error>
601 where
602         UL::Target: 'static + UtxoLookup,
603         CF::Target: 'static + chain::Filter,
604         CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
605         T::Target: 'static + BroadcasterInterface,
606         ES::Target: 'static + EntropySource,
607         NS::Target: 'static + NodeSigner,
608         SP::Target: 'static + SignerProvider,
609         F::Target: 'static + FeeEstimator,
610         R::Target: 'static + Router,
611         L::Target: 'static + Logger,
612         P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
613         CMH::Target: 'static + ChannelMessageHandler,
614         OMH::Target: 'static + OnionMessageHandler,
615         RMH::Target: 'static + RoutingMessageHandler,
616         UMH::Target: 'static + CustomMessageHandler,
617         PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
618 {
619         let mut should_break = false;
620         let async_event_handler = |event| {
621                 let network_graph = gossip_sync.network_graph();
622                 let event_handler = &event_handler;
623                 let scorer = &scorer;
624                 async move {
625                         if let Some(network_graph) = network_graph {
626                                 handle_network_graph_update(network_graph, &event)
627                         }
628                         if let Some(ref scorer) = scorer {
629                                 update_scorer(scorer, &event);
630                         }
631                         event_handler(event).await;
632                 }
633         };
634         define_run_body!(persister,
635                 chain_monitor, chain_monitor.process_pending_events_async(async_event_handler).await,
636                 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
637                 gossip_sync, peer_manager, logger, scorer, should_break, {
638                         let fut = Selector {
639                                 a: channel_manager.get_persistable_update_future(),
640                                 b: chain_monitor.get_update_future(),
641                                 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
642                         };
643                         match fut.await {
644                                 SelectorOutput::A => true,
645                                 SelectorOutput::B => false,
646                                 SelectorOutput::C(exit) => {
647                                         should_break = exit;
648                                         false
649                                 }
650                         }
651                 }, |t| sleeper(Duration::from_secs(t)),
652                 |fut: &mut SleepFuture, _| {
653                         let mut waker = dummy_waker();
654                         let mut ctx = task::Context::from_waker(&mut waker);
655                         match core::pin::Pin::new(fut).poll(&mut ctx) {
656                                 task::Poll::Ready(exit) => { should_break = exit; true },
657                                 task::Poll::Pending => false,
658                         }
659                 }, mobile_interruptable_platform)
660 }
661
662 #[cfg(feature = "std")]
663 impl BackgroundProcessor {
664         /// Start a background thread that takes care of responsibilities enumerated in the [top-level
665         /// documentation].
666         ///
667         /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
668         /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
669         /// either [`join`] or [`stop`].
670         ///
671         /// # Data Persistence
672         ///
673         /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
674         /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
675         /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
676         /// provided implementation.
677         ///
678         /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
679         /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
680         /// See the `lightning-persister` crate for LDK's provided implementation.
681         ///
682         /// Typically, users should either implement [`Persister::persist_manager`] to never return an
683         /// error or call [`join`] and handle any error that may arise. For the latter case,
684         /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
685         ///
686         /// # Event Handling
687         ///
688         /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
689         /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
690         /// functionality implemented by other handlers.
691         /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
692         ///
693         /// # Rapid Gossip Sync
694         ///
695         /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
696         /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
697         /// until the [`RapidGossipSync`] instance completes its first sync.
698         ///
699         /// [top-level documentation]: BackgroundProcessor
700         /// [`join`]: Self::join
701         /// [`stop`]: Self::stop
702         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
703         /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
704         /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
705         /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
706         /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
707         /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
708         pub fn start<
709                 'a,
710                 UL: 'static + Deref + Send + Sync,
711                 CF: 'static + Deref + Send + Sync,
712                 CW: 'static + Deref + Send + Sync,
713                 T: 'static + Deref + Send + Sync,
714                 ES: 'static + Deref + Send + Sync,
715                 NS: 'static + Deref + Send + Sync,
716                 SP: 'static + Deref + Send + Sync,
717                 F: 'static + Deref + Send + Sync,
718                 R: 'static + Deref + Send + Sync,
719                 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
720                 L: 'static + Deref + Send + Sync,
721                 P: 'static + Deref + Send + Sync,
722                 Descriptor: 'static + SocketDescriptor + Send + Sync,
723                 CMH: 'static + Deref + Send + Sync,
724                 OMH: 'static + Deref + Send + Sync,
725                 RMH: 'static + Deref + Send + Sync,
726                 EH: 'static + EventHandler + Send,
727                 PS: 'static + Deref + Send,
728                 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::Signer, CF, T, F, L, P>> + Send + Sync,
729                 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
730                 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
731                 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
732                 UMH: 'static + Deref + Send + Sync,
733                 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, OMH, L, UMH, NS>> + Send + Sync,
734                 S: 'static + Deref<Target = SC> + Send + Sync,
735                 SC: for <'b> WriteableScore<'b>,
736         >(
737                 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
738                 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
739         ) -> Self
740         where
741                 UL::Target: 'static + UtxoLookup,
742                 CF::Target: 'static + chain::Filter,
743                 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
744                 T::Target: 'static + BroadcasterInterface,
745                 ES::Target: 'static + EntropySource,
746                 NS::Target: 'static + NodeSigner,
747                 SP::Target: 'static + SignerProvider,
748                 F::Target: 'static + FeeEstimator,
749                 R::Target: 'static + Router,
750                 L::Target: 'static + Logger,
751                 P::Target: 'static + Persist<<SP::Target as SignerProvider>::Signer>,
752                 CMH::Target: 'static + ChannelMessageHandler,
753                 OMH::Target: 'static + OnionMessageHandler,
754                 RMH::Target: 'static + RoutingMessageHandler,
755                 UMH::Target: 'static + CustomMessageHandler,
756                 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
757         {
758                 let stop_thread = Arc::new(AtomicBool::new(false));
759                 let stop_thread_clone = stop_thread.clone();
760                 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
761                         let event_handler = |event| {
762                                 let network_graph = gossip_sync.network_graph();
763                                 if let Some(network_graph) = network_graph {
764                                         handle_network_graph_update(network_graph, &event)
765                                 }
766                                 if let Some(ref scorer) = scorer {
767                                         update_scorer(scorer, &event);
768                                 }
769                                 event_handler.handle_event(event);
770                         };
771                         define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
772                                 channel_manager, channel_manager.process_pending_events(&event_handler),
773                                 gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
774                                 Sleeper::from_two_futures(
775                                         channel_manager.get_persistable_update_future(),
776                                         chain_monitor.get_update_future()
777                                 ).wait_timeout(Duration::from_millis(100)),
778                                 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
779                 });
780                 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
781         }
782
783         /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
784         /// [`ChannelManager`].
785         ///
786         /// # Panics
787         ///
788         /// This function panics if the background thread has panicked such as while persisting or
789         /// handling events.
790         ///
791         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
792         pub fn join(mut self) -> Result<(), std::io::Error> {
793                 assert!(self.thread_handle.is_some());
794                 self.join_thread()
795         }
796
797         /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
798         /// [`ChannelManager`].
799         ///
800         /// # Panics
801         ///
802         /// This function panics if the background thread has panicked such as while persisting or
803         /// handling events.
804         ///
805         /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
806         pub fn stop(mut self) -> Result<(), std::io::Error> {
807                 assert!(self.thread_handle.is_some());
808                 self.stop_and_join_thread()
809         }
810
811         fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
812                 self.stop_thread.store(true, Ordering::Release);
813                 self.join_thread()
814         }
815
816         fn join_thread(&mut self) -> Result<(), std::io::Error> {
817                 match self.thread_handle.take() {
818                         Some(handle) => handle.join().unwrap(),
819                         None => Ok(()),
820                 }
821         }
822 }
823
824 #[cfg(feature = "std")]
825 impl Drop for BackgroundProcessor {
826         fn drop(&mut self) {
827                 self.stop_and_join_thread().unwrap();
828         }
829 }
830
831 #[cfg(all(feature = "std", test))]
832 mod tests {
833         use bitcoin::blockdata::block::BlockHeader;
834         use bitcoin::blockdata::constants::genesis_block;
835         use bitcoin::blockdata::locktime::PackedLockTime;
836         use bitcoin::blockdata::transaction::{Transaction, TxOut};
837         use bitcoin::network::constants::Network;
838         use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
839         use lightning::chain::{BestBlock, Confirm, chainmonitor};
840         use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
841         use lightning::chain::keysinterface::{InMemorySigner, KeysManager};
842         use lightning::chain::transaction::OutPoint;
843         use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
844         use lightning::{get_event_msg, get_event};
845         use lightning::ln::PaymentHash;
846         use lightning::ln::channelmanager;
847         use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
848         use lightning::ln::features::{ChannelFeatures, NodeFeatures};
849         use lightning::ln::msgs::{ChannelMessageHandler, Init};
850         use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
851         use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
852         use lightning::routing::router::{DefaultRouter, RouteHop};
853         use lightning::routing::scoring::{ChannelUsage, Score};
854         use lightning::util::config::UserConfig;
855         use lightning::util::ser::Writeable;
856         use lightning::util::test_utils;
857         use lightning::util::persist::KVStorePersister;
858         use lightning_persister::FilesystemPersister;
859         use std::collections::VecDeque;
860         use std::fs;
861         use std::path::PathBuf;
862         use std::sync::{Arc, Mutex};
863         use std::sync::mpsc::SyncSender;
864         use std::time::Duration;
865         use bitcoin::hashes::Hash;
866         use bitcoin::TxMerkleNode;
867         use lightning_rapid_gossip_sync::RapidGossipSync;
868         use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
869
870         const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
871
872         #[derive(Clone, Hash, PartialEq, Eq)]
873         struct TestDescriptor{}
874         impl SocketDescriptor for TestDescriptor {
875                 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
876                         0
877                 }
878
879                 fn disconnect_socket(&mut self) {}
880         }
881
882         type ChannelManager = channelmanager::ChannelManager<Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<DefaultRouter< Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<Mutex<TestScorer>>>>, Arc<test_utils::TestLogger>>;
883
884         type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
885
886         type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
887         type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
888
889         struct Node {
890                 node: Arc<ChannelManager>,
891                 p2p_gossip_sync: PGS,
892                 rapid_gossip_sync: RGS,
893                 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
894                 chain_monitor: Arc<ChainMonitor>,
895                 persister: Arc<FilesystemPersister>,
896                 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
897                 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
898                 logger: Arc<test_utils::TestLogger>,
899                 best_block: BestBlock,
900                 scorer: Arc<Mutex<TestScorer>>,
901         }
902
903         impl Node {
904                 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
905                         GossipSync::P2P(self.p2p_gossip_sync.clone())
906                 }
907
908                 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
909                         GossipSync::Rapid(self.rapid_gossip_sync.clone())
910                 }
911
912                 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
913                         GossipSync::None
914                 }
915         }
916
917         impl Drop for Node {
918                 fn drop(&mut self) {
919                         let data_dir = self.persister.get_data_dir();
920                         match fs::remove_dir_all(data_dir.clone()) {
921                                 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
922                                 _ => {}
923                         }
924                 }
925         }
926
927         struct Persister {
928                 graph_error: Option<(std::io::ErrorKind, &'static str)>,
929                 graph_persistence_notifier: Option<SyncSender<()>>,
930                 manager_error: Option<(std::io::ErrorKind, &'static str)>,
931                 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
932                 filesystem_persister: FilesystemPersister,
933         }
934
935         impl Persister {
936                 fn new(data_dir: String) -> Self {
937                         let filesystem_persister = FilesystemPersister::new(data_dir);
938                         Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
939                 }
940
941                 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
942                         Self { graph_error: Some((error, message)), ..self }
943                 }
944
945                 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
946                         Self { graph_persistence_notifier: Some(sender), ..self }
947                 }
948
949                 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
950                         Self { manager_error: Some((error, message)), ..self }
951                 }
952
953                 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
954                         Self { scorer_error: Some((error, message)), ..self }
955                 }
956         }
957
958         impl KVStorePersister for Persister {
959                 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
960                         if key == "manager" {
961                                 if let Some((error, message)) = self.manager_error {
962                                         return Err(std::io::Error::new(error, message))
963                                 }
964                         }
965
966                         if key == "network_graph" {
967                                 if let Some(sender) = &self.graph_persistence_notifier {
968                                         match sender.send(()) {
969                                                 Ok(()) => {},
970                                                 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
971                                         }
972                                 };
973
974                                 if let Some((error, message)) = self.graph_error {
975                                         return Err(std::io::Error::new(error, message))
976                                 }
977                         }
978
979                         if key == "scorer" {
980                                 if let Some((error, message)) = self.scorer_error {
981                                         return Err(std::io::Error::new(error, message))
982                                 }
983                         }
984
985                         self.filesystem_persister.persist(key, object)
986                 }
987         }
988
989         struct TestScorer {
990                 event_expectations: Option<VecDeque<TestResult>>,
991         }
992
993         #[derive(Debug)]
994         enum TestResult {
995                 PaymentFailure { path: Vec<RouteHop>, short_channel_id: u64 },
996                 PaymentSuccess { path: Vec<RouteHop> },
997                 ProbeFailure { path: Vec<RouteHop> },
998                 ProbeSuccess { path: Vec<RouteHop> },
999         }
1000
1001         impl TestScorer {
1002                 fn new() -> Self {
1003                         Self { event_expectations: None }
1004                 }
1005
1006                 fn expect(&mut self, expectation: TestResult) {
1007                         self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1008                 }
1009         }
1010
1011         impl lightning::util::ser::Writeable for TestScorer {
1012                 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1013         }
1014
1015         impl Score for TestScorer {
1016                 fn channel_penalty_msat(
1017                         &self, _short_channel_id: u64, _source: &NodeId, _target: &NodeId, _usage: ChannelUsage
1018                 ) -> u64 { unimplemented!(); }
1019
1020                 fn payment_path_failed(&mut self, actual_path: &[&RouteHop], actual_short_channel_id: u64) {
1021                         if let Some(expectations) = &mut self.event_expectations {
1022                                 match expectations.pop_front().unwrap() {
1023                                         TestResult::PaymentFailure { path, short_channel_id } => {
1024                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
1025                                                 assert_eq!(actual_short_channel_id, short_channel_id);
1026                                         },
1027                                         TestResult::PaymentSuccess { path } => {
1028                                                 panic!("Unexpected successful payment path: {:?}", path)
1029                                         },
1030                                         TestResult::ProbeFailure { path } => {
1031                                                 panic!("Unexpected probe failure: {:?}", path)
1032                                         },
1033                                         TestResult::ProbeSuccess { path } => {
1034                                                 panic!("Unexpected probe success: {:?}", path)
1035                                         }
1036                                 }
1037                         }
1038                 }
1039
1040                 fn payment_path_successful(&mut self, actual_path: &[&RouteHop]) {
1041                         if let Some(expectations) = &mut self.event_expectations {
1042                                 match expectations.pop_front().unwrap() {
1043                                         TestResult::PaymentFailure { path, .. } => {
1044                                                 panic!("Unexpected payment path failure: {:?}", path)
1045                                         },
1046                                         TestResult::PaymentSuccess { path } => {
1047                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
1048                                         },
1049                                         TestResult::ProbeFailure { path } => {
1050                                                 panic!("Unexpected probe failure: {:?}", path)
1051                                         },
1052                                         TestResult::ProbeSuccess { path } => {
1053                                                 panic!("Unexpected probe success: {:?}", path)
1054                                         }
1055                                 }
1056                         }
1057                 }
1058
1059                 fn probe_failed(&mut self, actual_path: &[&RouteHop], _: u64) {
1060                         if let Some(expectations) = &mut self.event_expectations {
1061                                 match expectations.pop_front().unwrap() {
1062                                         TestResult::PaymentFailure { path, .. } => {
1063                                                 panic!("Unexpected payment path failure: {:?}", path)
1064                                         },
1065                                         TestResult::PaymentSuccess { path } => {
1066                                                 panic!("Unexpected payment path success: {:?}", path)
1067                                         },
1068                                         TestResult::ProbeFailure { path } => {
1069                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
1070                                         },
1071                                         TestResult::ProbeSuccess { path } => {
1072                                                 panic!("Unexpected probe success: {:?}", path)
1073                                         }
1074                                 }
1075                         }
1076                 }
1077                 fn probe_successful(&mut self, actual_path: &[&RouteHop]) {
1078                         if let Some(expectations) = &mut self.event_expectations {
1079                                 match expectations.pop_front().unwrap() {
1080                                         TestResult::PaymentFailure { path, .. } => {
1081                                                 panic!("Unexpected payment path failure: {:?}", path)
1082                                         },
1083                                         TestResult::PaymentSuccess { path } => {
1084                                                 panic!("Unexpected payment path success: {:?}", path)
1085                                         },
1086                                         TestResult::ProbeFailure { path } => {
1087                                                 panic!("Unexpected probe failure: {:?}", path)
1088                                         },
1089                                         TestResult::ProbeSuccess { path } => {
1090                                                 assert_eq!(actual_path, &path.iter().collect::<Vec<_>>()[..]);
1091                                         }
1092                                 }
1093                         }
1094                 }
1095         }
1096
1097         impl Drop for TestScorer {
1098                 fn drop(&mut self) {
1099                         if std::thread::panicking() {
1100                                 return;
1101                         }
1102
1103                         if let Some(event_expectations) = &self.event_expectations {
1104                                 if !event_expectations.is_empty() {
1105                                         panic!("Unsatisfied event expectations: {:?}", event_expectations);
1106                                 }
1107                         }
1108                 }
1109         }
1110
1111         fn get_full_filepath(filepath: String, filename: String) -> String {
1112                 let mut path = PathBuf::from(filepath);
1113                 path.push(filename);
1114                 path.to_str().unwrap().to_string()
1115         }
1116
1117         fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
1118                 let network = Network::Testnet;
1119                 let mut nodes = Vec::new();
1120                 for i in 0..num_nodes {
1121                         let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1122                         let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1123                         let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1124                         let genesis_block = genesis_block(network);
1125                         let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1126                         let scorer = Arc::new(Mutex::new(TestScorer::new()));
1127                         let seed = [i as u8; 32];
1128                         let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone()));
1129                         let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
1130                         let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
1131                         let now = Duration::from_secs(genesis_block.header.time as u64);
1132                         let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1133                         let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
1134                         let best_block = BestBlock::from_network(network);
1135                         let params = ChainParameters { network, best_block };
1136                         let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params));
1137                         let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1138                         let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1139                         let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()), onion_message_handler: IgnoringMessageHandler{}};
1140                         let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), IgnoringMessageHandler{}, keys_manager.clone()));
1141                         let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
1142                         nodes.push(node);
1143                 }
1144
1145                 for i in 0..num_nodes {
1146                         for j in (i+1)..num_nodes {
1147                                 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: nodes[j].node.init_features(), remote_network_address: None }, true).unwrap();
1148                                 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: nodes[i].node.init_features(), remote_network_address: None }, false).unwrap();
1149                         }
1150                 }
1151
1152                 nodes
1153         }
1154
1155         macro_rules! open_channel {
1156                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1157                         begin_open_channel!($node_a, $node_b, $channel_value);
1158                         let events = $node_a.node.get_and_clear_pending_events();
1159                         assert_eq!(events.len(), 1);
1160                         let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1161                         $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1162                         $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1163                         get_event!($node_b, Event::ChannelPending);
1164                         $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1165                         get_event!($node_a, Event::ChannelPending);
1166                         tx
1167                 }}
1168         }
1169
1170         macro_rules! begin_open_channel {
1171                 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1172                         $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
1173                         $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1174                         $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1175                 }}
1176         }
1177
1178         macro_rules! handle_funding_generation_ready {
1179                 ($event: expr, $channel_value: expr) => {{
1180                         match $event {
1181                                 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1182                                         assert_eq!(channel_value_satoshis, $channel_value);
1183                                         assert_eq!(user_channel_id, 42);
1184
1185                                         let tx = Transaction { version: 1 as i32, lock_time: PackedLockTime(0), input: Vec::new(), output: vec![TxOut {
1186                                                 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1187                                         }]};
1188                                         (temporary_channel_id, tx)
1189                                 },
1190                                 _ => panic!("Unexpected event"),
1191                         }
1192                 }}
1193         }
1194
1195         fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1196                 for i in 1..=depth {
1197                         let prev_blockhash = node.best_block.block_hash();
1198                         let height = node.best_block.height() + 1;
1199                         let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 };
1200                         let txdata = vec![(0, tx)];
1201                         node.best_block = BestBlock::new(header.block_hash(), height);
1202                         match i {
1203                                 1 => {
1204                                         node.node.transactions_confirmed(&header, &txdata, height);
1205                                         node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1206                                 },
1207                                 x if x == depth => {
1208                                         node.node.best_block_updated(&header, height);
1209                                         node.chain_monitor.best_block_updated(&header, height);
1210                                 },
1211                                 _ => {},
1212                         }
1213                 }
1214         }
1215         fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1216                 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1217         }
1218
1219         #[test]
1220         fn test_background_processor() {
1221                 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1222                 // updates. Also test that when new updates are available, the manager signals that it needs
1223                 // re-persistence and is successfully re-persisted.
1224                 let nodes = create_nodes(2, "test_background_processor".to_string());
1225
1226                 // Go through the channel creation process so that each node has something to persist. Since
1227                 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1228                 // avoid a race with processing events.
1229                 let tx = open_channel!(nodes[0], nodes[1], 100000);
1230
1231                 // Initiate the background processors to watch each node.
1232                 let data_dir = nodes[0].persister.get_data_dir();
1233                 let persister = Arc::new(Persister::new(data_dir));
1234                 let event_handler = |_: _| {};
1235                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1236
1237                 macro_rules! check_persisted_data {
1238                         ($node: expr, $filepath: expr) => {
1239                                 let mut expected_bytes = Vec::new();
1240                                 loop {
1241                                         expected_bytes.clear();
1242                                         match $node.write(&mut expected_bytes) {
1243                                                 Ok(()) => {
1244                                                         match std::fs::read($filepath) {
1245                                                                 Ok(bytes) => {
1246                                                                         if bytes == expected_bytes {
1247                                                                                 break
1248                                                                         } else {
1249                                                                                 continue
1250                                                                         }
1251                                                                 },
1252                                                                 Err(_) => continue
1253                                                         }
1254                                                 },
1255                                                 Err(e) => panic!("Unexpected error: {}", e)
1256                                         }
1257                                 }
1258                         }
1259                 }
1260
1261                 // Check that the initial channel manager data is persisted as expected.
1262                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
1263                 check_persisted_data!(nodes[0].node, filepath.clone());
1264
1265                 loop {
1266                         if !nodes[0].node.get_persistence_condvar_value() { break }
1267                 }
1268
1269                 // Force-close the channel.
1270                 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1271
1272                 // Check that the force-close updates are persisted.
1273                 check_persisted_data!(nodes[0].node, filepath.clone());
1274                 loop {
1275                         if !nodes[0].node.get_persistence_condvar_value() { break }
1276                 }
1277
1278                 // Check network graph is persisted
1279                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
1280                 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1281
1282                 // Check scorer is persisted
1283                 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
1284                 check_persisted_data!(nodes[0].scorer, filepath.clone());
1285
1286                 if !std::thread::panicking() {
1287                         bg_processor.stop().unwrap();
1288                 }
1289         }
1290
1291         #[test]
1292         fn test_timer_tick_called() {
1293                 // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1294                 // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
1295                 // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
1296                 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
1297                 let data_dir = nodes[0].persister.get_data_dir();
1298                 let persister = Arc::new(Persister::new(data_dir));
1299                 let event_handler = |_: _| {};
1300                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1301                 loop {
1302                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1303                         let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1304                         let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1305                         let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1306                         if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() &&
1307                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() &&
1308                                 log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() {
1309                                 break
1310                         }
1311                 }
1312
1313                 if !std::thread::panicking() {
1314                         bg_processor.stop().unwrap();
1315                 }
1316         }
1317
1318         #[test]
1319         fn test_channel_manager_persist_error() {
1320                 // Test that if we encounter an error during manager persistence, the thread panics.
1321                 let nodes = create_nodes(2, "test_persist_error".to_string());
1322                 open_channel!(nodes[0], nodes[1], 100000);
1323
1324                 let data_dir = nodes[0].persister.get_data_dir();
1325                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1326                 let event_handler = |_: _| {};
1327                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1328                 match bg_processor.join() {
1329                         Ok(_) => panic!("Expected error persisting manager"),
1330                         Err(e) => {
1331                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1332                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1333                         },
1334                 }
1335         }
1336
1337         #[tokio::test]
1338         #[cfg(feature = "futures")]
1339         async fn test_channel_manager_persist_error_async() {
1340                 // Test that if we encounter an error during manager persistence, the thread panics.
1341                 let nodes = create_nodes(2, "test_persist_error_sync".to_string());
1342                 open_channel!(nodes[0], nodes[1], 100000);
1343
1344                 let data_dir = nodes[0].persister.get_data_dir();
1345                 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1346
1347                 let bp_future = super::process_events_async(
1348                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1349                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1350                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1351                                 Box::pin(async move {
1352                                         tokio::time::sleep(dur).await;
1353                                         false // Never exit
1354                                 })
1355                         }, false,
1356                 );
1357                 match bp_future.await {
1358                         Ok(_) => panic!("Expected error persisting manager"),
1359                         Err(e) => {
1360                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1361                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1362                         },
1363                 }
1364         }
1365
1366         #[test]
1367         fn test_network_graph_persist_error() {
1368                 // Test that if we encounter an error during network graph persistence, an error gets returned.
1369                 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
1370                 let data_dir = nodes[0].persister.get_data_dir();
1371                 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1372                 let event_handler = |_: _| {};
1373                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1374
1375                 match bg_processor.stop() {
1376                         Ok(_) => panic!("Expected error persisting network graph"),
1377                         Err(e) => {
1378                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1379                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1380                         },
1381                 }
1382         }
1383
1384         #[test]
1385         fn test_scorer_persist_error() {
1386                 // Test that if we encounter an error during scorer persistence, an error gets returned.
1387                 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
1388                 let data_dir = nodes[0].persister.get_data_dir();
1389                 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1390                 let event_handler = |_: _| {};
1391                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1392
1393                 match bg_processor.stop() {
1394                         Ok(_) => panic!("Expected error persisting scorer"),
1395                         Err(e) => {
1396                                 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1397                                 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1398                         },
1399                 }
1400         }
1401
1402         #[test]
1403         fn test_background_event_handling() {
1404                 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
1405                 let channel_value = 100000;
1406                 let data_dir = nodes[0].persister.get_data_dir();
1407                 let persister = Arc::new(Persister::new(data_dir.clone()));
1408
1409                 // Set up a background event handler for FundingGenerationReady events.
1410                 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1411                 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1412                 let event_handler = move |event: Event| match event {
1413                         Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1414                         Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1415                         Event::ChannelReady { .. } => {},
1416                         _ => panic!("Unexpected event: {:?}", event),
1417                 };
1418
1419                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1420
1421                 // Open a channel and check that the FundingGenerationReady event was handled.
1422                 begin_open_channel!(nodes[0], nodes[1], channel_value);
1423                 let (temporary_channel_id, funding_tx) = funding_generation_recv
1424                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1425                         .expect("FundingGenerationReady not handled within deadline");
1426                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1427                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1428                 get_event!(nodes[1], Event::ChannelPending);
1429                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1430                 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1431                         .expect("ChannelPending not handled within deadline");
1432
1433                 // Confirm the funding transaction.
1434                 confirm_transaction(&mut nodes[0], &funding_tx);
1435                 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1436                 confirm_transaction(&mut nodes[1], &funding_tx);
1437                 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1438                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1439                 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1440                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1441                 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1442
1443                 if !std::thread::panicking() {
1444                         bg_processor.stop().unwrap();
1445                 }
1446
1447                 // Set up a background event handler for SpendableOutputs events.
1448                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1449                 let event_handler = move |event: Event| match event {
1450                         Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1451                         Event::ChannelReady { .. } => {},
1452                         Event::ChannelClosed { .. } => {},
1453                         _ => panic!("Unexpected event: {:?}", event),
1454                 };
1455                 let persister = Arc::new(Persister::new(data_dir));
1456                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1457
1458                 // Force close the channel and check that the SpendableOutputs event was handled.
1459                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1460                 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1461                 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1462
1463                 let event = receiver
1464                         .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1465                         .expect("Events not handled within deadline");
1466                 match event {
1467                         Event::SpendableOutputs { .. } => {},
1468                         _ => panic!("Unexpected event: {:?}", event),
1469                 }
1470
1471                 if !std::thread::panicking() {
1472                         bg_processor.stop().unwrap();
1473                 }
1474         }
1475
1476         #[test]
1477         fn test_scorer_persistence() {
1478                 let nodes = create_nodes(2, "test_scorer_persistence".to_string());
1479                 let data_dir = nodes[0].persister.get_data_dir();
1480                 let persister = Arc::new(Persister::new(data_dir));
1481                 let event_handler = |_: _| {};
1482                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1483
1484                 loop {
1485                         let log_entries = nodes[0].logger.lines.lock().unwrap();
1486                         let expected_log = "Persisting scorer".to_string();
1487                         if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() {
1488                                 break
1489                         }
1490                 }
1491
1492                 if !std::thread::panicking() {
1493                         bg_processor.stop().unwrap();
1494                 }
1495         }
1496
1497         macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1498                 ($nodes: expr, $receive: expr, $sleep: expr) => {
1499                         let features = ChannelFeatures::empty();
1500                         $nodes[0].network_graph.add_channel_from_partial_announcement(
1501                                 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1502                         ).expect("Failed to update channel from partial announcement");
1503                         let original_graph_description = $nodes[0].network_graph.to_string();
1504                         assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1505                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1506
1507                         loop {
1508                                 $sleep;
1509                                 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1510                                 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1511                                 if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
1512                                         .unwrap_or(&0) > 1
1513                                 {
1514                                         // Wait until the loop has gone around at least twice.
1515                                         break
1516                                 }
1517                         }
1518
1519                         let initialization_input = vec![
1520                                 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1521                                 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1522                                 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1523                                 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1524                                 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1525                                 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1526                                 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1527                                 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1528                                 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1529                                 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1530                                 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1531                                 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1532                                 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1533                         ];
1534                         $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1535
1536                         // this should have added two channels and pruned the previous one.
1537                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1538
1539                         $receive.expect("Network graph not pruned within deadline");
1540
1541                         // all channels should now be pruned
1542                         assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1543                 }
1544         }
1545
1546         #[test]
1547         fn test_not_pruning_network_graph_until_graph_sync_completion() {
1548                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1549
1550                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
1551                 let data_dir = nodes[0].persister.get_data_dir();
1552                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1553
1554                 let event_handler = |_: _| {};
1555                 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1556
1557                 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1558                         receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1559                         std::thread::sleep(Duration::from_millis(1)));
1560
1561                 background_processor.stop().unwrap();
1562         }
1563
1564         #[tokio::test]
1565         #[cfg(feature = "futures")]
1566         async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1567                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1568
1569                 let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
1570                 let data_dir = nodes[0].persister.get_data_dir();
1571                 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1572
1573                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1574                 let bp_future = super::process_events_async(
1575                         persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1576                         nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1577                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1578                                 let mut exit_receiver = exit_receiver.clone();
1579                                 Box::pin(async move {
1580                                         tokio::select! {
1581                                                 _ = tokio::time::sleep(dur) => false,
1582                                                 _ = exit_receiver.changed() => true,
1583                                         }
1584                                 })
1585                         }, false,
1586                 );
1587
1588                 let t1 = tokio::spawn(bp_future);
1589                 let t2 = tokio::spawn(async move {
1590                         do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1591                                 let mut i = 0;
1592                                 loop {
1593                                         tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1594                                         if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1595                                         assert!(i < 5);
1596                                         i += 1;
1597                                 }
1598                         }, tokio::time::sleep(Duration::from_millis(1)).await);
1599                         exit_sender.send(()).unwrap();
1600                 });
1601                 let (r1, r2) = tokio::join!(t1, t2);
1602                 r1.unwrap().unwrap();
1603                 r2.unwrap()
1604         }
1605
1606         macro_rules! do_test_payment_path_scoring {
1607                 ($nodes: expr, $receive: expr) => {
1608                         // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1609                         // that we update the scorer upon a payment path succeeding (note that the channel must be
1610                         // public or else we won't score it).
1611                         // A background event handler for FundingGenerationReady events must be hooked up to a
1612                         // running background processor.
1613                         let scored_scid = 4242;
1614                         let secp_ctx = Secp256k1::new();
1615                         let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1616                         let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1617
1618                         let path = vec![RouteHop {
1619                                 pubkey: node_1_id,
1620                                 node_features: NodeFeatures::empty(),
1621                                 short_channel_id: scored_scid,
1622                                 channel_features: ChannelFeatures::empty(),
1623                                 fee_msat: 0,
1624                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1625                         }];
1626
1627                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1628                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1629                                 payment_id: None,
1630                                 payment_hash: PaymentHash([42; 32]),
1631                                 payment_failed_permanently: false,
1632                                 failure: PathFailure::OnPath { network_update: None },
1633                                 path: path.clone(),
1634                                 short_channel_id: Some(scored_scid),
1635                         });
1636                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1637                         match event {
1638                                 Event::PaymentPathFailed { .. } => {},
1639                                 _ => panic!("Unexpected event"),
1640                         }
1641
1642                         // Ensure we'll score payments that were explicitly failed back by the destination as
1643                         // ProbeSuccess.
1644                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1645                         $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1646                                 payment_id: None,
1647                                 payment_hash: PaymentHash([42; 32]),
1648                                 payment_failed_permanently: true,
1649                                 failure: PathFailure::OnPath { network_update: None },
1650                                 path: path.clone(),
1651                                 short_channel_id: None,
1652                         });
1653                         let event = $receive.expect("PaymentPathFailed not handled within deadline");
1654                         match event {
1655                                 Event::PaymentPathFailed { .. } => {},
1656                                 _ => panic!("Unexpected event"),
1657                         }
1658
1659                         $nodes[0].scorer.lock().unwrap().expect(TestResult::PaymentSuccess { path: path.clone() });
1660                         $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1661                                 payment_id: PaymentId([42; 32]),
1662                                 payment_hash: None,
1663                                 path: path.clone(),
1664                         });
1665                         let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1666                         match event {
1667                                 Event::PaymentPathSuccessful { .. } => {},
1668                                 _ => panic!("Unexpected event"),
1669                         }
1670
1671                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeSuccess { path: path.clone() });
1672                         $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1673                                 payment_id: PaymentId([42; 32]),
1674                                 payment_hash: PaymentHash([42; 32]),
1675                                 path: path.clone(),
1676                         });
1677                         let event = $receive.expect("ProbeSuccessful not handled within deadline");
1678                         match event {
1679                                 Event::ProbeSuccessful  { .. } => {},
1680                                 _ => panic!("Unexpected event"),
1681                         }
1682
1683                         $nodes[0].scorer.lock().unwrap().expect(TestResult::ProbeFailure { path: path.clone() });
1684                         $nodes[0].node.push_pending_event(Event::ProbeFailed {
1685                                 payment_id: PaymentId([42; 32]),
1686                                 payment_hash: PaymentHash([42; 32]),
1687                                 path,
1688                                 short_channel_id: Some(scored_scid),
1689                         });
1690                         let event = $receive.expect("ProbeFailure not handled within deadline");
1691                         match event {
1692                                 Event::ProbeFailed { .. } => {},
1693                                 _ => panic!("Unexpected event"),
1694                         }
1695                 }
1696         }
1697
1698         #[test]
1699         fn test_payment_path_scoring() {
1700                 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1701                 let event_handler = move |event: Event| match event {
1702                         Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1703                         Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1704                         Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1705                         Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1706                         _ => panic!("Unexpected event: {:?}", event),
1707                 };
1708
1709                 let nodes = create_nodes(1, "test_payment_path_scoring".to_string());
1710                 let data_dir = nodes[0].persister.get_data_dir();
1711                 let persister = Arc::new(Persister::new(data_dir));
1712                 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1713
1714                 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1715
1716                 if !std::thread::panicking() {
1717                         bg_processor.stop().unwrap();
1718                 }
1719         }
1720
1721         #[tokio::test]
1722         #[cfg(feature = "futures")]
1723         async fn test_payment_path_scoring_async() {
1724                 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1725                 let event_handler = move |event: Event| {
1726                         let sender_ref = sender.clone();
1727                         async move {
1728                                 match event {
1729                                         Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1730                                         Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1731                                         Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1732                                         Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1733                                         _ => panic!("Unexpected event: {:?}", event),
1734                                 }
1735                         }
1736                 };
1737
1738                 let nodes = create_nodes(1, "test_payment_path_scoring_async".to_string());
1739                 let data_dir = nodes[0].persister.get_data_dir();
1740                 let persister = Arc::new(Persister::new(data_dir));
1741
1742                 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1743
1744                 let bp_future = super::process_events_async(
1745                         persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1746                         nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1747                         Some(nodes[0].scorer.clone()), move |dur: Duration| {
1748                                 let mut exit_receiver = exit_receiver.clone();
1749                                 Box::pin(async move {
1750                                         tokio::select! {
1751                                                 _ = tokio::time::sleep(dur) => false,
1752                                                 _ = exit_receiver.changed() => true,
1753                                         }
1754                                 })
1755                         }, false,
1756                 );
1757                 let t1 = tokio::spawn(bp_future);
1758                 let t2 = tokio::spawn(async move {
1759                         do_test_payment_path_scoring!(nodes, receiver.recv().await);
1760                         exit_sender.send(()).unwrap();
1761                 });
1762
1763                 let (r1, r2) = tokio::join!(t1, t2);
1764                 r1.unwrap().unwrap();
1765                 r2.unwrap()
1766         }
1767 }