1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(rustdoc::broken_intra_doc_links)]
6 #![deny(rustdoc::private_intra_doc_links)]
9 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15 #[cfg(any(test, feature = "std"))]
18 #[cfg(not(feature = "std"))]
21 #[macro_use] extern crate lightning;
22 extern crate lightning_rapid_gossip_sync;
25 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
26 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
27 use lightning::events::{Event, PathFailure};
28 #[cfg(feature = "std")]
29 use lightning::events::EventHandler;
30 #[cfg(feature = "std")]
31 use lightning::events::EventsProvider;
33 use lightning::ln::channelmanager::AChannelManager;
34 use lightning::ln::msgs::OnionMessageHandler;
35 use lightning::onion_message::messenger::AOnionMessenger;
36 use lightning::ln::peer_handler::APeerManager;
37 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
38 use lightning::routing::utxo::UtxoLookup;
39 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
40 use lightning::util::logger::Logger;
41 use lightning::util::persist::Persister;
42 #[cfg(feature = "std")]
43 use lightning::util::wakers::Sleeper;
44 use lightning_rapid_gossip_sync::RapidGossipSync;
47 use core::time::Duration;
49 #[cfg(feature = "std")]
51 #[cfg(feature = "std")]
52 use core::sync::atomic::{AtomicBool, Ordering};
53 #[cfg(feature = "std")]
54 use std::thread::{self, JoinHandle};
55 #[cfg(feature = "std")]
56 use std::time::Instant;
58 #[cfg(not(feature = "std"))]
59 use alloc::boxed::Box;
61 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
62 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
63 /// responsibilities are:
64 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
65 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
66 /// writing it to disk/backups by invoking the callback given to it at startup.
67 /// [`ChannelManager`] persistence should be done in the background.
68 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
69 /// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
70 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
71 /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
73 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
74 /// upon as doing so may result in high latency.
78 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
79 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
80 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
81 /// unilateral chain closure fees are at risk.
83 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
84 /// [`ChannelManager::timer_tick_occurred`]: lightning::ln::channelmanager::ChannelManager::timer_tick_occurred
85 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
86 /// [`Event`]: lightning::events::Event
87 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
88 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
89 #[cfg(feature = "std")]
90 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
91 pub struct BackgroundProcessor {
92 stop_thread: Arc<AtomicBool>,
93 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
97 const FRESHNESS_TIMER: u64 = 60;
99 const FRESHNESS_TIMER: u64 = 1;
101 #[cfg(all(not(test), not(debug_assertions)))]
102 const PING_TIMER: u64 = 10;
103 /// Signature operations take a lot longer without compiler optimisations.
104 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
105 /// timeout is reached.
106 #[cfg(all(not(test), debug_assertions))]
107 const PING_TIMER: u64 = 30;
109 const PING_TIMER: u64 = 1;
112 const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
114 const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
116 /// Prune the network graph of stale entries hourly.
117 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
120 const SCORER_PERSIST_TIMER: u64 = 60 * 5;
122 const SCORER_PERSIST_TIMER: u64 = 1;
125 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
127 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
130 const REBROADCAST_TIMER: u64 = 30;
132 const REBROADCAST_TIMER: u64 = 1;
134 #[cfg(feature = "futures")]
135 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
136 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
137 #[cfg(feature = "futures")]
138 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
139 min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
141 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
143 P: Deref<Target = P2PGossipSync<G, U, L>>,
144 R: Deref<Target = RapidGossipSync<G, L>>,
145 G: Deref<Target = NetworkGraph<L>>,
149 where U::Target: UtxoLookup, L::Target: Logger {
150 /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
152 /// Rapid gossip sync from a trusted server.
159 P: Deref<Target = P2PGossipSync<G, U, L>>,
160 R: Deref<Target = RapidGossipSync<G, L>>,
161 G: Deref<Target = NetworkGraph<L>>,
164 > GossipSync<P, R, G, U, L>
165 where U::Target: UtxoLookup, L::Target: Logger {
166 fn network_graph(&self) -> Option<&G> {
168 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
169 GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
170 GossipSync::None => None,
174 fn prunable_network_graph(&self) -> Option<&G> {
176 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
177 GossipSync::Rapid(gossip_sync) => {
178 if gossip_sync.is_initial_sync_complete() {
179 Some(gossip_sync.network_graph())
184 GossipSync::None => None,
189 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
190 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
191 GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
193 U::Target: UtxoLookup,
196 /// Initializes a new [`GossipSync::P2P`] variant.
197 pub fn p2p(gossip_sync: P) -> Self {
198 GossipSync::P2P(gossip_sync)
202 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
203 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
205 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
208 &'a (dyn UtxoLookup + Send + Sync),
214 /// Initializes a new [`GossipSync::Rapid`] variant.
215 pub fn rapid(gossip_sync: R) -> Self {
216 GossipSync::Rapid(gossip_sync)
220 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
223 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
224 &RapidGossipSync<&'a NetworkGraph<L>, L>,
226 &'a (dyn UtxoLookup + Send + Sync),
232 /// Initializes a new [`GossipSync::None`] variant.
233 pub fn none() -> Self {
238 fn handle_network_graph_update<L: Deref>(
239 network_graph: &NetworkGraph<L>, event: &Event
240 ) where L::Target: Logger {
241 if let Event::PaymentPathFailed {
242 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
244 network_graph.handle_network_update(upd);
248 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
250 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
251 scorer: &'a S, event: &Event, duration_since_epoch: Duration,
254 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
255 let mut score = scorer.write_lock();
256 score.payment_path_failed(path, *scid, duration_since_epoch);
258 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
259 // Reached if the destination explicitly failed it back. We treat this as a successful probe
260 // because the payment made it all the way to the destination with sufficient liquidity.
261 let mut score = scorer.write_lock();
262 score.probe_successful(path, duration_since_epoch);
264 Event::PaymentPathSuccessful { path, .. } => {
265 let mut score = scorer.write_lock();
266 score.payment_path_successful(path, duration_since_epoch);
268 Event::ProbeSuccessful { path, .. } => {
269 let mut score = scorer.write_lock();
270 score.probe_successful(path, duration_since_epoch);
272 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
273 let mut score = scorer.write_lock();
274 score.probe_failed(path, *scid, duration_since_epoch);
281 macro_rules! define_run_body {
283 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
284 $channel_manager: ident, $process_channel_manager_events: expr,
285 $onion_messenger: ident, $process_onion_message_handler_events: expr,
286 $peer_manager: ident, $gossip_sync: ident,
287 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
288 $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
290 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
291 $channel_manager.get_cm().timer_tick_occurred();
292 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
293 $chain_monitor.rebroadcast_pending_claims();
295 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
296 let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
297 let mut last_ping_call = $get_timer(PING_TIMER);
298 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
299 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
300 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
301 let mut have_pruned = false;
302 let mut have_decayed_scorer = false;
305 $process_channel_manager_events;
306 $process_chain_monitor_events;
307 $process_onion_message_handler_events;
309 // Note that the PeerManager::process_events may block on ChannelManager's locks,
310 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
311 // we want to ensure we get into `persist_manager` as quickly as we can, especially
312 // without running the normal event processing above and handing events to users.
314 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
315 // processing a message effectively at any point during this loop. In order to
316 // minimize the time between such processing completing and persisting the updated
317 // ChannelManager, we want to minimize methods blocking on a ChannelManager
318 // generally, and as a fallback place such blocking only immediately before
320 $peer_manager.as_ref().process_events();
322 // Exit the loop if the background processor was requested to stop.
323 if $loop_exit_check {
324 log_trace!($logger, "Terminating background processor.");
328 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
329 // see `await_start`'s use below.
330 let mut await_start = None;
331 if $check_slow_await { await_start = Some($get_timer(1)); }
333 let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
335 // Exit the loop if the background processor was requested to stop.
336 if $loop_exit_check {
337 log_trace!($logger, "Terminating background processor.");
341 if $channel_manager.get_cm().get_and_clear_needs_persistence() {
342 log_trace!($logger, "Persisting ChannelManager...");
343 $persister.persist_manager(&$channel_manager)?;
344 log_trace!($logger, "Done persisting ChannelManager.");
346 if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
347 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
348 $channel_manager.get_cm().timer_tick_occurred();
349 last_freshness_call = $get_timer(FRESHNESS_TIMER);
351 if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
352 if let Some(om) = &$onion_messenger {
353 log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
354 om.get_om().timer_tick_occurred();
356 last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
359 // On various platforms, we may be starved of CPU cycles for several reasons.
360 // E.g. on iOS, if we've been in the background, we will be entirely paused.
361 // Similarly, if we're on a desktop platform and the device has been asleep, we
362 // may not get any cycles.
363 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
364 // full second, at which point we assume sockets may have been killed (they
365 // appear to be at least on some platforms, even if it has only been a second).
366 // Note that we have to take care to not get here just because user event
367 // processing was slow at the top of the loop. For example, the sample client
368 // may call Bitcoin Core RPCs during event handling, which very often takes
369 // more than a handful of seconds to complete, and shouldn't disconnect all our
371 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
372 $peer_manager.as_ref().disconnect_all_peers();
373 last_ping_call = $get_timer(PING_TIMER);
374 } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
375 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
376 $peer_manager.as_ref().timer_tick_occurred();
377 last_ping_call = $get_timer(PING_TIMER);
380 // Note that we want to run a graph prune once not long after startup before
381 // falling back to our usual hourly prunes. This avoids short-lived clients never
382 // pruning their network graph. We run once 60 seconds after startup before
383 // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
384 // we prune after an initial sync completes.
385 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
386 let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
387 let should_prune = match $gossip_sync {
388 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
389 _ => prune_timer_elapsed,
392 // The network graph must not be pruned while rapid sync completion is pending
393 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
394 if let Some(duration_since_epoch) = $time_fetch() {
395 log_trace!($logger, "Pruning and persisting network graph.");
396 network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
398 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
399 log_trace!($logger, "Persisting network graph.");
402 if let Err(e) = $persister.persist_graph(network_graph) {
403 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
408 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
409 last_prune_call = $get_timer(prune_timer);
412 if !have_decayed_scorer {
413 if let Some(ref scorer) = $scorer {
414 if let Some(duration_since_epoch) = $time_fetch() {
415 log_trace!($logger, "Calling time_passed on scorer at startup");
416 scorer.write_lock().time_passed(duration_since_epoch);
419 have_decayed_scorer = true;
422 if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
423 if let Some(ref scorer) = $scorer {
424 if let Some(duration_since_epoch) = $time_fetch() {
425 log_trace!($logger, "Calling time_passed and persisting scorer");
426 scorer.write_lock().time_passed(duration_since_epoch);
428 log_trace!($logger, "Persisting scorer");
430 if let Err(e) = $persister.persist_scorer(&scorer) {
431 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
434 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
437 if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
438 log_trace!($logger, "Rebroadcasting monitor's pending claims");
439 $chain_monitor.rebroadcast_pending_claims();
440 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
444 // After we exit, ensure we persist the ChannelManager one final time - this avoids
445 // some races where users quit while channel updates were in-flight, with
446 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
447 $persister.persist_manager(&$channel_manager)?;
449 // Persist Scorer on exit
450 if let Some(ref scorer) = $scorer {
451 $persister.persist_scorer(&scorer)?;
454 // Persist NetworkGraph on exit
455 if let Some(network_graph) = $gossip_sync.network_graph() {
456 $persister.persist_graph(network_graph)?;
463 #[cfg(feature = "futures")]
464 pub(crate) mod futures_util {
465 use core::future::Future;
466 use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
468 use core::marker::Unpin;
469 pub(crate) struct Selector<
470 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
476 pub(crate) enum SelectorOutput {
481 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
482 > Future for Selector<A, B, C> {
483 type Output = SelectorOutput;
484 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
485 match Pin::new(&mut self.a).poll(ctx) {
486 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
489 match Pin::new(&mut self.b).poll(ctx) {
490 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
493 match Pin::new(&mut self.c).poll(ctx) {
494 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
501 // If we want to poll a future without an async context to figure out if it has completed or
502 // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
503 // but sadly there's a good bit of boilerplate here.
504 fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
505 fn dummy_waker_action(_: *const ()) { }
507 const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
508 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
509 pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
511 #[cfg(feature = "futures")]
512 use futures_util::{Selector, SelectorOutput, dummy_waker};
513 #[cfg(feature = "futures")]
516 /// Processes background events in a future.
518 /// `sleeper` should return a future which completes in the given amount of time and returns a
519 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
520 /// future which outputs `true`, the loop will exit and this function's future will complete.
521 /// The `sleeper` future is free to return early after it has triggered the exit condition.
523 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
525 /// Requires the `futures` feature. Note that while this method is available without the `std`
526 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
527 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
528 /// manually instead.
530 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
531 /// mobile device, where we may need to check for interruption of the application regularly. If you
532 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
533 /// are hundreds or thousands of simultaneous process calls running.
535 /// The `fetch_time` parameter should return the current wall clock time, if one is available. If
536 /// no time is available, some features may be disabled, however the node will still operate fine.
538 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
539 /// could setup `process_events_async` like this:
541 /// # use lightning::io;
542 /// # use std::sync::{Arc, RwLock};
543 /// # use std::sync::atomic::{AtomicBool, Ordering};
544 /// # use std::time::SystemTime;
545 /// # use lightning_background_processor::{process_events_async, GossipSync};
546 /// # struct Logger {}
547 /// # impl lightning::util::logger::Logger for Logger {
548 /// # fn log(&self, _record: lightning::util::logger::Record) {}
550 /// # struct Store {}
551 /// # impl lightning::util::persist::KVStore for Store {
552 /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
553 /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
554 /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
555 /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
557 /// # struct EventHandler {}
558 /// # impl EventHandler {
559 /// # async fn handle_event(&self, _: lightning::events::Event) {}
561 /// # #[derive(Eq, PartialEq, Clone, Hash)]
562 /// # struct SocketDescriptor {}
563 /// # impl lightning::ln::peer_handler::SocketDescriptor for SocketDescriptor {
564 /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
565 /// # fn disconnect_socket(&mut self) {}
567 /// # type ChainMonitor<B, F, FE> = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<F>, Arc<B>, Arc<FE>, Arc<Logger>, Arc<Store>>;
568 /// # type NetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<Logger>>;
569 /// # type P2PGossipSync<UL> = lightning::routing::gossip::P2PGossipSync<Arc<NetworkGraph>, Arc<UL>, Arc<Logger>>;
570 /// # type ChannelManager<B, F, FE> = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor<B, F, FE>, B, FE, Logger>;
571 /// # type OnionMessenger<B, F, FE> = lightning::onion_message::messenger::OnionMessenger<Arc<lightning::sign::KeysManager>, Arc<lightning::sign::KeysManager>, Arc<Logger>, Arc<ChannelManager<B, F, FE>>, Arc<lightning::onion_message::messenger::DefaultMessageRouter<Arc<NetworkGraph>, Arc<Logger>, Arc<lightning::sign::KeysManager>>>, Arc<ChannelManager<B, F, FE>>, lightning::ln::peer_handler::IgnoringMessageHandler>;
572 /// # type Scorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<NetworkGraph>, Arc<Logger>>>;
573 /// # type PeerManager<B, F, FE, UL> = lightning::ln::peer_handler::SimpleArcPeerManager<SocketDescriptor, ChainMonitor<B, F, FE>, B, FE, Arc<UL>, Logger>;
576 /// # B: lightning::chain::chaininterface::BroadcasterInterface + Send + Sync + 'static,
577 /// # F: lightning::chain::Filter + Send + Sync + 'static,
578 /// # FE: lightning::chain::chaininterface::FeeEstimator + Send + Sync + 'static,
579 /// # UL: lightning::routing::utxo::UtxoLookup + Send + Sync + 'static,
581 /// # peer_manager: Arc<PeerManager<B, F, FE, UL>>,
582 /// # event_handler: Arc<EventHandler>,
583 /// # channel_manager: Arc<ChannelManager<B, F, FE>>,
584 /// # onion_messenger: Arc<OnionMessenger<B, F, FE>>,
585 /// # chain_monitor: Arc<ChainMonitor<B, F, FE>>,
586 /// # gossip_sync: Arc<P2PGossipSync<UL>>,
587 /// # persister: Arc<Store>,
588 /// # logger: Arc<Logger>,
589 /// # scorer: Arc<Scorer>,
592 /// # async fn setup_background_processing<
593 /// # B: lightning::chain::chaininterface::BroadcasterInterface + Send + Sync + 'static,
594 /// # F: lightning::chain::Filter + Send + Sync + 'static,
595 /// # FE: lightning::chain::chaininterface::FeeEstimator + Send + Sync + 'static,
596 /// # UL: lightning::routing::utxo::UtxoLookup + Send + Sync + 'static,
597 /// # >(node: Node<B, F, FE, UL>) {
598 /// let background_persister = Arc::clone(&node.persister);
599 /// let background_event_handler = Arc::clone(&node.event_handler);
600 /// let background_chain_mon = Arc::clone(&node.chain_monitor);
601 /// let background_chan_man = Arc::clone(&node.channel_manager);
602 /// let background_gossip_sync = GossipSync::p2p(Arc::clone(&node.gossip_sync));
603 /// let background_peer_man = Arc::clone(&node.peer_manager);
604 /// let background_onion_messenger = Arc::clone(&node.onion_messenger);
605 /// let background_logger = Arc::clone(&node.logger);
606 /// let background_scorer = Arc::clone(&node.scorer);
608 /// // Setup the sleeper.
609 /// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
611 /// let sleeper = move |d| {
612 /// let mut receiver = stop_receiver.clone();
613 /// Box::pin(async move {
615 /// _ = tokio::time::sleep(d) => false,
616 /// _ = receiver.changed() => true,
621 /// let mobile_interruptable_platform = false;
623 /// let handle = tokio::spawn(async move {
624 /// process_events_async(
625 /// background_persister,
626 /// |e| background_event_handler.handle_event(e),
627 /// background_chain_mon,
628 /// background_chan_man,
629 /// Some(background_onion_messenger),
630 /// background_gossip_sync,
631 /// background_peer_man,
632 /// background_logger,
633 /// Some(background_scorer),
635 /// mobile_interruptable_platform,
636 /// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
639 /// .expect("Failed to process events");
642 /// // Stop the background processing.
643 /// stop_sender.send(()).unwrap();
644 /// handle.await.unwrap();
647 #[cfg(feature = "futures")]
648 pub async fn process_events_async<
650 UL: 'static + Deref + Send + Sync,
651 CF: 'static + Deref + Send + Sync,
652 T: 'static + Deref + Send + Sync,
653 F: 'static + Deref + Send + Sync,
654 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
655 L: 'static + Deref + Send + Sync,
656 P: 'static + Deref + Send + Sync,
657 EventHandlerFuture: core::future::Future<Output = ()>,
658 EventHandler: Fn(Event) -> EventHandlerFuture,
659 PS: 'static + Deref + Send,
660 M: 'static + Deref<Target = ChainMonitor<<CM::Target as AChannelManager>::Signer, CF, T, F, L, P>> + Send + Sync,
661 CM: 'static + Deref + Send + Sync,
662 OM: 'static + Deref + Send + Sync,
663 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
664 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
665 PM: 'static + Deref + Send + Sync,
666 S: 'static + Deref<Target = SC> + Send + Sync,
667 SC: for<'b> WriteableScore<'b>,
668 SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
669 Sleeper: Fn(Duration) -> SleepFuture,
670 FetchTime: Fn() -> Option<Duration>,
672 persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
673 onion_messenger: Option<OM>,
674 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
675 sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
676 ) -> Result<(), lightning::io::Error>
678 UL::Target: 'static + UtxoLookup,
679 CF::Target: 'static + chain::Filter,
680 T::Target: 'static + BroadcasterInterface,
681 F::Target: 'static + FeeEstimator,
682 L::Target: 'static + Logger,
683 P::Target: 'static + Persist<<CM::Target as AChannelManager>::Signer>,
684 PS::Target: 'static + Persister<'a, CM, L, SC>,
685 CM::Target: AChannelManager + Send + Sync,
686 OM::Target: AOnionMessenger + Send + Sync,
687 PM::Target: APeerManager + Send + Sync,
689 let mut should_break = false;
690 let async_event_handler = |event| {
691 let network_graph = gossip_sync.network_graph();
692 let event_handler = &event_handler;
693 let scorer = &scorer;
694 let logger = &logger;
695 let persister = &persister;
696 let fetch_time = &fetch_time;
697 Box::pin(async move { // We should be able to drop the Box once our MSRV is 1.68
698 if let Some(network_graph) = network_graph {
699 handle_network_graph_update(network_graph, &event)
701 if let Some(ref scorer) = scorer {
702 if let Some(duration_since_epoch) = fetch_time() {
703 if update_scorer(scorer, &event, duration_since_epoch) {
704 log_trace!(logger, "Persisting scorer after update");
705 if let Err(e) = persister.persist_scorer(&scorer) {
706 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
711 event_handler(event).await;
715 persister, chain_monitor,
716 chain_monitor.process_pending_events_async(async_event_handler).await,
717 channel_manager, channel_manager.get_cm().process_pending_events_async(async_event_handler).await,
718 onion_messenger, if let Some(om) = &onion_messenger { om.get_om().process_pending_events_async(async_event_handler).await },
719 peer_manager, gossip_sync, logger, scorer, should_break, {
721 a: channel_manager.get_cm().get_event_or_persistence_needed_future(),
722 b: chain_monitor.get_update_future(),
723 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
726 SelectorOutput::A|SelectorOutput::B => {},
727 SelectorOutput::C(exit) => {
731 }, |t| sleeper(Duration::from_secs(t)),
732 |fut: &mut SleepFuture, _| {
733 let mut waker = dummy_waker();
734 let mut ctx = task::Context::from_waker(&mut waker);
735 match core::pin::Pin::new(fut).poll(&mut ctx) {
736 task::Poll::Ready(exit) => { should_break = exit; true },
737 task::Poll::Pending => false,
739 }, mobile_interruptable_platform, fetch_time,
743 #[cfg(feature = "std")]
744 impl BackgroundProcessor {
745 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
748 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
749 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
750 /// either [`join`] or [`stop`].
752 /// # Data Persistence
754 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
755 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
756 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
757 /// provided implementation.
759 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
760 /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
761 /// See the `lightning-persister` crate for LDK's provided implementation.
763 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
764 /// error or call [`join`] and handle any error that may arise. For the latter case,
765 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
769 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
770 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
771 /// functionality implemented by other handlers.
772 /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
774 /// # Rapid Gossip Sync
776 /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
777 /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
778 /// until the [`RapidGossipSync`] instance completes its first sync.
780 /// [top-level documentation]: BackgroundProcessor
781 /// [`join`]: Self::join
782 /// [`stop`]: Self::stop
783 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
784 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
785 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
786 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
787 /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
788 /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
791 UL: 'static + Deref + Send + Sync,
792 CF: 'static + Deref + Send + Sync,
793 T: 'static + Deref + Send + Sync,
794 F: 'static + Deref + Send + Sync,
795 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
796 L: 'static + Deref + Send + Sync,
797 P: 'static + Deref + Send + Sync,
798 EH: 'static + EventHandler + Send,
799 PS: 'static + Deref + Send,
800 M: 'static + Deref<Target = ChainMonitor<<CM::Target as AChannelManager>::Signer, CF, T, F, L, P>> + Send + Sync,
801 CM: 'static + Deref + Send + Sync,
802 OM: 'static + Deref + Send + Sync,
803 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
804 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
805 PM: 'static + Deref + Send + Sync,
806 S: 'static + Deref<Target = SC> + Send + Sync,
807 SC: for <'b> WriteableScore<'b>,
809 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
810 onion_messenger: Option<OM>,
811 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
814 UL::Target: 'static + UtxoLookup,
815 CF::Target: 'static + chain::Filter,
816 T::Target: 'static + BroadcasterInterface,
817 F::Target: 'static + FeeEstimator,
818 L::Target: 'static + Logger,
819 P::Target: 'static + Persist<<CM::Target as AChannelManager>::Signer>,
820 PS::Target: 'static + Persister<'a, CM, L, SC>,
821 CM::Target: AChannelManager + Send + Sync,
822 OM::Target: AOnionMessenger + Send + Sync,
823 PM::Target: APeerManager + Send + Sync,
825 let stop_thread = Arc::new(AtomicBool::new(false));
826 let stop_thread_clone = stop_thread.clone();
827 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
828 let event_handler = |event| {
829 let network_graph = gossip_sync.network_graph();
830 if let Some(network_graph) = network_graph {
831 handle_network_graph_update(network_graph, &event)
833 if let Some(ref scorer) = scorer {
834 use std::time::SystemTime;
835 let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
836 .expect("Time should be sometime after 1970");
837 if update_scorer(scorer, &event, duration_since_epoch) {
838 log_trace!(logger, "Persisting scorer after update");
839 if let Err(e) = persister.persist_scorer(&scorer) {
840 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
844 event_handler.handle_event(event);
847 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
848 channel_manager, channel_manager.get_cm().process_pending_events(&event_handler),
849 onion_messenger, if let Some(om) = &onion_messenger { om.get_om().process_pending_events(&event_handler) },
850 peer_manager, gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
851 { Sleeper::from_two_futures(
852 &channel_manager.get_cm().get_event_or_persistence_needed_future(),
853 &chain_monitor.get_update_future()
854 ).wait_timeout(Duration::from_millis(100)); },
855 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
857 use std::time::SystemTime;
858 Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
859 .expect("Time should be sometime after 1970"))
863 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
866 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
867 /// [`ChannelManager`].
871 /// This function panics if the background thread has panicked such as while persisting or
874 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
875 pub fn join(mut self) -> Result<(), std::io::Error> {
876 assert!(self.thread_handle.is_some());
880 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
881 /// [`ChannelManager`].
885 /// This function panics if the background thread has panicked such as while persisting or
888 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
889 pub fn stop(mut self) -> Result<(), std::io::Error> {
890 assert!(self.thread_handle.is_some());
891 self.stop_and_join_thread()
894 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
895 self.stop_thread.store(true, Ordering::Release);
899 fn join_thread(&mut self) -> Result<(), std::io::Error> {
900 match self.thread_handle.take() {
901 Some(handle) => handle.join().unwrap(),
907 #[cfg(feature = "std")]
908 impl Drop for BackgroundProcessor {
910 self.stop_and_join_thread().unwrap();
914 #[cfg(all(feature = "std", test))]
916 use bitcoin::{ScriptBuf, Txid};
917 use bitcoin::blockdata::constants::{genesis_block, ChainHash};
918 use bitcoin::blockdata::locktime::absolute::LockTime;
919 use bitcoin::blockdata::transaction::{Transaction, TxOut};
920 use bitcoin::hashes::Hash;
921 use bitcoin::network::constants::Network;
922 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
923 use lightning::chain::{BestBlock, Confirm, chainmonitor, Filter};
924 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
925 use lightning::sign::{InMemorySigner, KeysManager, ChangeDestinationSource};
926 use lightning::chain::transaction::OutPoint;
927 use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
928 use lightning::{get_event_msg, get_event};
929 use lightning::ln::types::{PaymentHash, ChannelId};
930 use lightning::ln::channelmanager;
931 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
932 use lightning::ln::features::{ChannelFeatures, NodeFeatures};
933 use lightning::ln::functional_test_utils::*;
934 use lightning::ln::msgs::{ChannelMessageHandler, Init};
935 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
936 use lightning::onion_message::messenger::{DefaultMessageRouter, OnionMessenger};
937 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
938 use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
939 use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
940 use lightning::util::config::UserConfig;
941 use lightning::util::ser::Writeable;
942 use lightning::util::test_utils;
943 use lightning::util::persist::{KVStore,
944 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
945 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
946 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
947 use lightning::util::sweep::{OutputSweeper, OutputSpendStatus};
948 use lightning_persister::fs_store::FilesystemStore;
949 use std::collections::VecDeque;
951 use std::path::PathBuf;
952 use std::sync::{Arc, Mutex};
953 use std::sync::mpsc::SyncSender;
954 use std::time::Duration;
955 use lightning_rapid_gossip_sync::RapidGossipSync;
956 use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
958 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
960 #[derive(Clone, Hash, PartialEq, Eq)]
961 struct TestDescriptor{}
962 impl SocketDescriptor for TestDescriptor {
963 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
967 fn disconnect_socket(&mut self) {}
971 type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
972 #[cfg(not(c_bindings))]
973 type LockingWrapper<T> = Mutex<T>;
975 type ChannelManager =
976 channelmanager::ChannelManager<
978 Arc<test_utils::TestBroadcaster>,
982 Arc<test_utils::TestFeeEstimator>,
984 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
985 Arc<test_utils::TestLogger>,
987 Arc<LockingWrapper<TestScorer>>,
991 Arc<test_utils::TestLogger>>;
993 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
995 type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
996 type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
998 type OM = OnionMessenger<Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestLogger>, Arc<ChannelManager>, Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<KeysManager>>>, IgnoringMessageHandler, IgnoringMessageHandler>;
1001 node: Arc<ChannelManager>,
1003 p2p_gossip_sync: PGS,
1004 rapid_gossip_sync: RGS,
1005 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<OM>, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
1006 chain_monitor: Arc<ChainMonitor>,
1007 kv_store: Arc<FilesystemStore>,
1008 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
1009 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
1010 logger: Arc<test_utils::TestLogger>,
1011 best_block: BestBlock,
1012 scorer: Arc<LockingWrapper<TestScorer>>,
1013 sweeper: Arc<OutputSweeper<Arc<test_utils::TestBroadcaster>, Arc<TestWallet>,
1014 Arc<test_utils::TestFeeEstimator>, Arc<dyn Filter + Sync + Send>, Arc<FilesystemStore>,
1015 Arc<test_utils::TestLogger>, Arc<KeysManager>>>,
1019 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1020 GossipSync::P2P(self.p2p_gossip_sync.clone())
1023 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1024 GossipSync::Rapid(self.rapid_gossip_sync.clone())
1027 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1032 impl Drop for Node {
1033 fn drop(&mut self) {
1034 let data_dir = self.kv_store.get_data_dir();
1035 match fs::remove_dir_all(data_dir.clone()) {
1036 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
1043 graph_error: Option<(std::io::ErrorKind, &'static str)>,
1044 graph_persistence_notifier: Option<SyncSender<()>>,
1045 manager_error: Option<(std::io::ErrorKind, &'static str)>,
1046 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
1047 kv_store: FilesystemStore,
1051 fn new(data_dir: PathBuf) -> Self {
1052 let kv_store = FilesystemStore::new(data_dir);
1053 Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1056 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1057 Self { graph_error: Some((error, message)), ..self }
1060 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1061 Self { graph_persistence_notifier: Some(sender), ..self }
1064 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1065 Self { manager_error: Some((error, message)), ..self }
1068 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1069 Self { scorer_error: Some((error, message)), ..self }
1073 impl KVStore for Persister {
1074 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1075 self.kv_store.read(primary_namespace, secondary_namespace, key)
1078 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1079 if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1080 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1081 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1083 if let Some((error, message)) = self.manager_error {
1084 return Err(std::io::Error::new(error, message))
1088 if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1089 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1090 key == NETWORK_GRAPH_PERSISTENCE_KEY
1092 if let Some(sender) = &self.graph_persistence_notifier {
1093 match sender.send(()) {
1095 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1099 if let Some((error, message)) = self.graph_error {
1100 return Err(std::io::Error::new(error, message))
1104 if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1105 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1106 key == SCORER_PERSISTENCE_KEY
1108 if let Some((error, message)) = self.scorer_error {
1109 return Err(std::io::Error::new(error, message))
1113 self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1116 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1117 self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1120 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1121 self.kv_store.list(primary_namespace, secondary_namespace)
1126 event_expectations: Option<VecDeque<TestResult>>,
1131 PaymentFailure { path: Path, short_channel_id: u64 },
1132 PaymentSuccess { path: Path },
1133 ProbeFailure { path: Path },
1134 ProbeSuccess { path: Path },
1139 Self { event_expectations: None }
1142 fn expect(&mut self, expectation: TestResult) {
1143 self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1147 impl lightning::util::ser::Writeable for TestScorer {
1148 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1151 impl ScoreLookUp for TestScorer {
1152 type ScoreParams = ();
1153 fn channel_penalty_msat(
1154 &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1155 ) -> u64 { unimplemented!(); }
1158 impl ScoreUpdate for TestScorer {
1159 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
1160 if let Some(expectations) = &mut self.event_expectations {
1161 match expectations.pop_front().unwrap() {
1162 TestResult::PaymentFailure { path, short_channel_id } => {
1163 assert_eq!(actual_path, &path);
1164 assert_eq!(actual_short_channel_id, short_channel_id);
1166 TestResult::PaymentSuccess { path } => {
1167 panic!("Unexpected successful payment path: {:?}", path)
1169 TestResult::ProbeFailure { path } => {
1170 panic!("Unexpected probe failure: {:?}", path)
1172 TestResult::ProbeSuccess { path } => {
1173 panic!("Unexpected probe success: {:?}", path)
1179 fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
1180 if let Some(expectations) = &mut self.event_expectations {
1181 match expectations.pop_front().unwrap() {
1182 TestResult::PaymentFailure { path, .. } => {
1183 panic!("Unexpected payment path failure: {:?}", path)
1185 TestResult::PaymentSuccess { path } => {
1186 assert_eq!(actual_path, &path);
1188 TestResult::ProbeFailure { path } => {
1189 panic!("Unexpected probe failure: {:?}", path)
1191 TestResult::ProbeSuccess { path } => {
1192 panic!("Unexpected probe success: {:?}", path)
1198 fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
1199 if let Some(expectations) = &mut self.event_expectations {
1200 match expectations.pop_front().unwrap() {
1201 TestResult::PaymentFailure { path, .. } => {
1202 panic!("Unexpected payment path failure: {:?}", path)
1204 TestResult::PaymentSuccess { path } => {
1205 panic!("Unexpected payment path success: {:?}", path)
1207 TestResult::ProbeFailure { path } => {
1208 assert_eq!(actual_path, &path);
1210 TestResult::ProbeSuccess { path } => {
1211 panic!("Unexpected probe success: {:?}", path)
1216 fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
1217 if let Some(expectations) = &mut self.event_expectations {
1218 match expectations.pop_front().unwrap() {
1219 TestResult::PaymentFailure { path, .. } => {
1220 panic!("Unexpected payment path failure: {:?}", path)
1222 TestResult::PaymentSuccess { path } => {
1223 panic!("Unexpected payment path success: {:?}", path)
1225 TestResult::ProbeFailure { path } => {
1226 panic!("Unexpected probe failure: {:?}", path)
1228 TestResult::ProbeSuccess { path } => {
1229 assert_eq!(actual_path, &path);
1234 fn time_passed(&mut self, _: Duration) {}
1238 impl lightning::routing::scoring::Score for TestScorer {}
1240 impl Drop for TestScorer {
1241 fn drop(&mut self) {
1242 if std::thread::panicking() {
1246 if let Some(event_expectations) = &self.event_expectations {
1247 if !event_expectations.is_empty() {
1248 panic!("Unsatisfied event expectations: {:?}", event_expectations);
1254 struct TestWallet {}
1256 impl ChangeDestinationSource for TestWallet {
1257 fn get_change_destination_script(&self) -> Result<ScriptBuf, ()> {
1258 Ok(ScriptBuf::new())
1262 fn get_full_filepath(filepath: String, filename: String) -> String {
1263 let mut path = PathBuf::from(filepath);
1264 path.push(filename);
1265 path.to_str().unwrap().to_string()
1268 fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1269 let persist_temp_path = env::temp_dir().join(persist_dir);
1270 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1271 let network = Network::Bitcoin;
1272 let mut nodes = Vec::new();
1273 for i in 0..num_nodes {
1274 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1275 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1276 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1277 let genesis_block = genesis_block(network);
1278 let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1279 let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1280 let now = Duration::from_secs(genesis_block.header.time as u64);
1281 let seed = [i as u8; 32];
1282 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1283 let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), Arc::clone(&keys_manager), scorer.clone(), Default::default()));
1284 let msg_router = Arc::new(DefaultMessageRouter::new(network_graph.clone(), Arc::clone(&keys_manager)));
1285 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1286 let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1287 let now = Duration::from_secs(genesis_block.header.time as u64);
1288 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1289 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1290 let best_block = BestBlock::from_network(network);
1291 let params = ChainParameters { network, best_block };
1292 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1293 let messenger = Arc::new(OnionMessenger::new(keys_manager.clone(), keys_manager.clone(), logger.clone(), manager.clone(), msg_router.clone(), IgnoringMessageHandler {}, IgnoringMessageHandler {}));
1294 let wallet = Arc::new(TestWallet {});
1295 let sweeper = Arc::new(OutputSweeper::new(best_block, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator),
1296 None::<Arc<dyn Filter + Sync + Send>>, Arc::clone(&keys_manager), wallet, Arc::clone(&kv_store), Arc::clone(&logger)));
1297 let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1298 let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1299 let msg_handler = MessageHandler {
1300 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1301 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1302 onion_message_handler: messenger.clone(), custom_message_handler: IgnoringMessageHandler{}
1304 let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1305 let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer, sweeper, messenger };
1309 for i in 0..num_nodes {
1310 for j in (i+1)..num_nodes {
1311 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1312 features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1314 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1315 features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1320 (persist_dir, nodes)
1323 macro_rules! open_channel {
1324 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1325 begin_open_channel!($node_a, $node_b, $channel_value);
1326 let events = $node_a.node.get_and_clear_pending_events();
1327 assert_eq!(events.len(), 1);
1328 let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1329 $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1330 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1331 get_event!($node_b, Event::ChannelPending);
1332 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1333 get_event!($node_a, Event::ChannelPending);
1338 macro_rules! begin_open_channel {
1339 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1340 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1341 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1342 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1346 macro_rules! handle_funding_generation_ready {
1347 ($event: expr, $channel_value: expr) => {{
1349 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1350 assert_eq!(channel_value_satoshis, $channel_value);
1351 assert_eq!(user_channel_id, 42);
1353 let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1354 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1356 (temporary_channel_id, tx)
1358 _ => panic!("Unexpected event"),
1363 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1364 for i in 1..=depth {
1365 let prev_blockhash = node.best_block.block_hash;
1366 let height = node.best_block.height + 1;
1367 let header = create_dummy_header(prev_blockhash, height);
1368 let txdata = vec![(0, tx)];
1369 node.best_block = BestBlock::new(header.block_hash(), height);
1372 node.node.transactions_confirmed(&header, &txdata, height);
1373 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1374 node.sweeper.transactions_confirmed(&header, &txdata, height);
1376 x if x == depth => {
1377 // We need the TestBroadcaster to know about the new height so that it doesn't think
1378 // we're violating the time lock requirements of transactions broadcasted at that
1380 node.tx_broadcaster.blocks.lock().unwrap().push((genesis_block(Network::Bitcoin), height));
1381 node.node.best_block_updated(&header, height);
1382 node.chain_monitor.best_block_updated(&header, height);
1383 node.sweeper.best_block_updated(&header, height);
1390 fn advance_chain(node: &mut Node, num_blocks: u32) {
1391 for i in 1..=num_blocks {
1392 let prev_blockhash = node.best_block.block_hash;
1393 let height = node.best_block.height + 1;
1394 let header = create_dummy_header(prev_blockhash, height);
1395 node.best_block = BestBlock::new(header.block_hash(), height);
1396 if i == num_blocks {
1397 // We need the TestBroadcaster to know about the new height so that it doesn't think
1398 // we're violating the time lock requirements of transactions broadcasted at that
1400 node.tx_broadcaster.blocks.lock().unwrap().push((genesis_block(Network::Bitcoin), height));
1401 node.node.best_block_updated(&header, height);
1402 node.chain_monitor.best_block_updated(&header, height);
1403 node.sweeper.best_block_updated(&header, height);
1408 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1409 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1413 fn test_background_processor() {
1414 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1415 // updates. Also test that when new updates are available, the manager signals that it needs
1416 // re-persistence and is successfully re-persisted.
1417 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1419 // Go through the channel creation process so that each node has something to persist. Since
1420 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1421 // avoid a race with processing events.
1422 let tx = open_channel!(nodes[0], nodes[1], 100000);
1424 // Initiate the background processors to watch each node.
1425 let data_dir = nodes[0].kv_store.get_data_dir();
1426 let persister = Arc::new(Persister::new(data_dir));
1427 let event_handler = |_: _| {};
1428 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1430 macro_rules! check_persisted_data {
1431 ($node: expr, $filepath: expr) => {
1432 let mut expected_bytes = Vec::new();
1434 expected_bytes.clear();
1435 match $node.write(&mut expected_bytes) {
1437 match std::fs::read($filepath) {
1439 if bytes == expected_bytes {
1448 Err(e) => panic!("Unexpected error: {}", e)
1454 // Check that the initial channel manager data is persisted as expected.
1455 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1456 check_persisted_data!(nodes[0].node, filepath.clone());
1459 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1462 // Force-close the channel.
1463 nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap();
1465 // Check that the force-close updates are persisted.
1466 check_persisted_data!(nodes[0].node, filepath.clone());
1468 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1471 // Check network graph is persisted
1472 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1473 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1475 // Check scorer is persisted
1476 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1477 check_persisted_data!(nodes[0].scorer, filepath.clone());
1479 if !std::thread::panicking() {
1480 bg_processor.stop().unwrap();
1485 fn test_timer_tick_called() {
1487 // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1488 // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
1489 // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
1490 // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
1491 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1492 let data_dir = nodes[0].kv_store.get_data_dir();
1493 let persister = Arc::new(Persister::new(data_dir));
1494 let event_handler = |_: _| {};
1495 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1497 let log_entries = nodes[0].logger.lines.lock().unwrap();
1498 let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1499 let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1500 let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1501 let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
1502 if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1503 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1504 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
1505 log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
1510 if !std::thread::panicking() {
1511 bg_processor.stop().unwrap();
1516 fn test_channel_manager_persist_error() {
1517 // Test that if we encounter an error during manager persistence, the thread panics.
1518 let (_, nodes) = create_nodes(2, "test_persist_error");
1519 open_channel!(nodes[0], nodes[1], 100000);
1521 let data_dir = nodes[0].kv_store.get_data_dir();
1522 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1523 let event_handler = |_: _| {};
1524 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1525 match bg_processor.join() {
1526 Ok(_) => panic!("Expected error persisting manager"),
1528 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1529 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1535 #[cfg(feature = "futures")]
1536 async fn test_channel_manager_persist_error_async() {
1537 // Test that if we encounter an error during manager persistence, the thread panics.
1538 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1539 open_channel!(nodes[0], nodes[1], 100000);
1541 let data_dir = nodes[0].kv_store.get_data_dir();
1542 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1544 let bp_future = super::process_events_async(
1545 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()),
1546 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1547 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1548 Box::pin(async move {
1549 tokio::time::sleep(dur).await;
1552 }, false, || Some(Duration::ZERO),
1554 match bp_future.await {
1555 Ok(_) => panic!("Expected error persisting manager"),
1557 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1558 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1564 fn test_network_graph_persist_error() {
1565 // Test that if we encounter an error during network graph persistence, an error gets returned.
1566 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1567 let data_dir = nodes[0].kv_store.get_data_dir();
1568 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1569 let event_handler = |_: _| {};
1570 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1572 match bg_processor.stop() {
1573 Ok(_) => panic!("Expected error persisting network graph"),
1575 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1576 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1582 fn test_scorer_persist_error() {
1583 // Test that if we encounter an error during scorer persistence, an error gets returned.
1584 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1585 let data_dir = nodes[0].kv_store.get_data_dir();
1586 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1587 let event_handler = |_: _| {};
1588 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1590 match bg_processor.stop() {
1591 Ok(_) => panic!("Expected error persisting scorer"),
1593 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1594 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1600 fn test_background_event_handling() {
1601 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1602 let channel_value = 100000;
1603 let data_dir = nodes[0].kv_store.get_data_dir();
1604 let persister = Arc::new(Persister::new(data_dir.clone()));
1606 // Set up a background event handler for FundingGenerationReady events.
1607 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1608 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1609 let event_handler = move |event: Event| match event {
1610 Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1611 Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1612 Event::ChannelReady { .. } => {},
1613 _ => panic!("Unexpected event: {:?}", event),
1616 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1618 // Open a channel and check that the FundingGenerationReady event was handled.
1619 begin_open_channel!(nodes[0], nodes[1], channel_value);
1620 let (temporary_channel_id, funding_tx) = funding_generation_recv
1621 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1622 .expect("FundingGenerationReady not handled within deadline");
1623 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1624 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1625 get_event!(nodes[1], Event::ChannelPending);
1626 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1627 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1628 .expect("ChannelPending not handled within deadline");
1630 // Confirm the funding transaction.
1631 confirm_transaction(&mut nodes[0], &funding_tx);
1632 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1633 confirm_transaction(&mut nodes[1], &funding_tx);
1634 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1635 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1636 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1637 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1638 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1639 let broadcast_funding = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1640 assert_eq!(broadcast_funding.txid(), funding_tx.txid());
1641 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
1643 if !std::thread::panicking() {
1644 bg_processor.stop().unwrap();
1647 // Set up a background event handler for SpendableOutputs events.
1648 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1649 let event_handler = move |event: Event| match event {
1650 Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1651 Event::ChannelReady { .. } => {},
1652 Event::ChannelClosed { .. } => {},
1653 _ => panic!("Unexpected event: {:?}", event),
1655 let persister = Arc::new(Persister::new(data_dir));
1656 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1658 // Force close the channel and check that the SpendableOutputs event was handled.
1659 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1660 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1661 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1663 let event = receiver
1664 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1665 .expect("Events not handled within deadline");
1667 Event::SpendableOutputs { outputs, channel_id } => {
1668 nodes[0].sweeper.track_spendable_outputs(outputs, channel_id, false, Some(153)).unwrap();
1670 _ => panic!("Unexpected event: {:?}", event),
1673 // Check we don't generate an initial sweeping tx until we reach the required height.
1674 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1675 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1676 if let Some(sweep_tx_0) = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop() {
1677 assert!(!tracked_output.is_spent_in(&sweep_tx_0));
1678 match tracked_output.status {
1679 OutputSpendStatus::PendingInitialBroadcast { delayed_until_height } => {
1680 assert_eq!(delayed_until_height, Some(153));
1682 _ => panic!("Unexpected status"),
1686 advance_chain(&mut nodes[0], 3);
1688 // Check we generate an initial sweeping tx.
1689 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1690 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1691 let sweep_tx_0 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1692 match tracked_output.status {
1693 OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
1694 assert_eq!(sweep_tx_0.txid(), latest_spending_tx.txid());
1696 _ => panic!("Unexpected status"),
1699 // Check we regenerate and rebroadcast the sweeping tx each block.
1700 advance_chain(&mut nodes[0], 1);
1701 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1702 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1703 let sweep_tx_1 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1704 match tracked_output.status {
1705 OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
1706 assert_eq!(sweep_tx_1.txid(), latest_spending_tx.txid());
1708 _ => panic!("Unexpected status"),
1710 assert_ne!(sweep_tx_0, sweep_tx_1);
1712 advance_chain(&mut nodes[0], 1);
1713 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1714 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1715 let sweep_tx_2 = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1716 match tracked_output.status {
1717 OutputSpendStatus::PendingFirstConfirmation { latest_spending_tx, .. } => {
1718 assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
1720 _ => panic!("Unexpected status"),
1722 assert_ne!(sweep_tx_0, sweep_tx_2);
1723 assert_ne!(sweep_tx_1, sweep_tx_2);
1725 // Check we still track the spendable outputs up to ANTI_REORG_DELAY confirmations.
1726 confirm_transaction_depth(&mut nodes[0], &sweep_tx_2, 5);
1727 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1728 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1729 match tracked_output.status {
1730 OutputSpendStatus::PendingThresholdConfirmations { latest_spending_tx, .. } => {
1731 assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
1733 _ => panic!("Unexpected status"),
1736 // Check we still see the transaction as confirmed if we unconfirm any untracked
1737 // transaction. (We previously had a bug that would mark tracked transactions as
1738 // unconfirmed if any transaction at an unknown block height would be unconfirmed.)
1739 let unconf_txid = Txid::from_slice(&[0; 32]).unwrap();
1740 nodes[0].sweeper.transaction_unconfirmed(&unconf_txid);
1742 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 1);
1743 let tracked_output = nodes[0].sweeper.tracked_spendable_outputs().first().unwrap().clone();
1744 match tracked_output.status {
1745 OutputSpendStatus::PendingThresholdConfirmations { latest_spending_tx, .. } => {
1746 assert_eq!(sweep_tx_2.txid(), latest_spending_tx.txid());
1748 _ => panic!("Unexpected status"),
1751 // Check we stop tracking the spendable outputs when one of the txs reaches
1752 // ANTI_REORG_DELAY confirmations.
1753 confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY);
1754 assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0);
1756 if !std::thread::panicking() {
1757 bg_processor.stop().unwrap();
1762 fn test_scorer_persistence() {
1763 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1764 let data_dir = nodes[0].kv_store.get_data_dir();
1765 let persister = Arc::new(Persister::new(data_dir));
1766 let event_handler = |_: _| {};
1767 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1770 let log_entries = nodes[0].logger.lines.lock().unwrap();
1771 let expected_log = "Calling time_passed and persisting scorer".to_string();
1772 if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1777 if !std::thread::panicking() {
1778 bg_processor.stop().unwrap();
1782 macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1783 ($nodes: expr, $receive: expr, $sleep: expr) => {
1784 let features = ChannelFeatures::empty();
1785 $nodes[0].network_graph.add_channel_from_partial_announcement(
1786 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1787 ).expect("Failed to update channel from partial announcement");
1788 let original_graph_description = $nodes[0].network_graph.to_string();
1789 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1790 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1794 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1795 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1796 if *log_entries.get(&("lightning_background_processor", loop_counter))
1799 // Wait until the loop has gone around at least twice.
1804 let initialization_input = vec![
1805 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1806 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1807 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1808 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1809 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1810 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1811 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1812 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1813 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1814 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1815 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1816 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1817 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1819 $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1821 // this should have added two channels and pruned the previous one.
1822 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1824 $receive.expect("Network graph not pruned within deadline");
1826 // all channels should now be pruned
1827 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1832 fn test_not_pruning_network_graph_until_graph_sync_completion() {
1833 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1835 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1836 let data_dir = nodes[0].kv_store.get_data_dir();
1837 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1839 let event_handler = |_: _| {};
1840 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1842 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1843 receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1844 std::thread::sleep(Duration::from_millis(1)));
1846 background_processor.stop().unwrap();
1850 #[cfg(feature = "futures")]
1851 async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1852 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1854 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1855 let data_dir = nodes[0].kv_store.get_data_dir();
1856 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1858 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1859 let bp_future = super::process_events_async(
1860 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()),
1861 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1862 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1863 let mut exit_receiver = exit_receiver.clone();
1864 Box::pin(async move {
1866 _ = tokio::time::sleep(dur) => false,
1867 _ = exit_receiver.changed() => true,
1870 }, false, || Some(Duration::from_secs(1696300000)),
1873 let t1 = tokio::spawn(bp_future);
1874 let t2 = tokio::spawn(async move {
1875 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1878 tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1879 if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1883 }, tokio::time::sleep(Duration::from_millis(1)).await);
1884 exit_sender.send(()).unwrap();
1886 let (r1, r2) = tokio::join!(t1, t2);
1887 r1.unwrap().unwrap();
1891 macro_rules! do_test_payment_path_scoring {
1892 ($nodes: expr, $receive: expr) => {
1893 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1894 // that we update the scorer upon a payment path succeeding (note that the channel must be
1895 // public or else we won't score it).
1896 // A background event handler for FundingGenerationReady events must be hooked up to a
1897 // running background processor.
1898 let scored_scid = 4242;
1899 let secp_ctx = Secp256k1::new();
1900 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1901 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1903 let path = Path { hops: vec![RouteHop {
1905 node_features: NodeFeatures::empty(),
1906 short_channel_id: scored_scid,
1907 channel_features: ChannelFeatures::empty(),
1909 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1910 maybe_announced_channel: true,
1911 }], blinded_tail: None };
1913 $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1914 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1916 payment_hash: PaymentHash([42; 32]),
1917 payment_failed_permanently: false,
1918 failure: PathFailure::OnPath { network_update: None },
1920 short_channel_id: Some(scored_scid),
1922 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1924 Event::PaymentPathFailed { .. } => {},
1925 _ => panic!("Unexpected event"),
1928 // Ensure we'll score payments that were explicitly failed back by the destination as
1930 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1931 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1933 payment_hash: PaymentHash([42; 32]),
1934 payment_failed_permanently: true,
1935 failure: PathFailure::OnPath { network_update: None },
1937 short_channel_id: None,
1939 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1941 Event::PaymentPathFailed { .. } => {},
1942 _ => panic!("Unexpected event"),
1945 $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1946 $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1947 payment_id: PaymentId([42; 32]),
1951 let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1953 Event::PaymentPathSuccessful { .. } => {},
1954 _ => panic!("Unexpected event"),
1957 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1958 $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1959 payment_id: PaymentId([42; 32]),
1960 payment_hash: PaymentHash([42; 32]),
1963 let event = $receive.expect("ProbeSuccessful not handled within deadline");
1965 Event::ProbeSuccessful { .. } => {},
1966 _ => panic!("Unexpected event"),
1969 $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1970 $nodes[0].node.push_pending_event(Event::ProbeFailed {
1971 payment_id: PaymentId([42; 32]),
1972 payment_hash: PaymentHash([42; 32]),
1974 short_channel_id: Some(scored_scid),
1976 let event = $receive.expect("ProbeFailure not handled within deadline");
1978 Event::ProbeFailed { .. } => {},
1979 _ => panic!("Unexpected event"),
1985 fn test_payment_path_scoring() {
1986 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1987 let event_handler = move |event: Event| match event {
1988 Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1989 Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1990 Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1991 Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1992 _ => panic!("Unexpected event: {:?}", event),
1995 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1996 let data_dir = nodes[0].kv_store.get_data_dir();
1997 let persister = Arc::new(Persister::new(data_dir));
1998 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
2000 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
2002 if !std::thread::panicking() {
2003 bg_processor.stop().unwrap();
2006 let log_entries = nodes[0].logger.lines.lock().unwrap();
2007 let expected_log = "Persisting scorer after update".to_string();
2008 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
2012 #[cfg(feature = "futures")]
2013 async fn test_payment_path_scoring_async() {
2014 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
2015 let event_handler = move |event: Event| {
2016 let sender_ref = sender.clone();
2019 Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
2020 Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
2021 Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
2022 Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
2023 _ => panic!("Unexpected event: {:?}", event),
2028 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
2029 let data_dir = nodes[0].kv_store.get_data_dir();
2030 let persister = Arc::new(Persister::new(data_dir));
2032 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
2034 let bp_future = super::process_events_async(
2035 persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), Some(nodes[0].messenger.clone()),
2036 nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
2037 Some(nodes[0].scorer.clone()), move |dur: Duration| {
2038 let mut exit_receiver = exit_receiver.clone();
2039 Box::pin(async move {
2041 _ = tokio::time::sleep(dur) => false,
2042 _ = exit_receiver.changed() => true,
2045 }, false, || Some(Duration::ZERO),
2047 let t1 = tokio::spawn(bp_future);
2048 let t2 = tokio::spawn(async move {
2049 do_test_payment_path_scoring!(nodes, receiver.recv().await);
2050 exit_sender.send(()).unwrap();
2052 let log_entries = nodes[0].logger.lines.lock().unwrap();
2053 let expected_log = "Persisting scorer after update".to_string();
2054 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
2057 let (r1, r2) = tokio::join!(t1, t2);
2058 r1.unwrap().unwrap();