1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(rustdoc::broken_intra_doc_links)]
6 #![deny(rustdoc::private_intra_doc_links)]
9 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15 #[cfg(any(test, feature = "std"))]
18 #[cfg(not(feature = "std"))]
21 #[macro_use] extern crate lightning;
22 extern crate lightning_rapid_gossip_sync;
25 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
26 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
27 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
28 use lightning::events::{Event, PathFailure};
29 #[cfg(feature = "std")]
30 use lightning::events::{EventHandler, EventsProvider};
31 use lightning::ln::channelmanager::ChannelManager;
32 use lightning::ln::msgs::OnionMessageHandler;
33 use lightning::ln::peer_handler::APeerManager;
34 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
35 use lightning::routing::utxo::UtxoLookup;
36 use lightning::routing::router::Router;
37 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
38 use lightning::util::logger::Logger;
39 use lightning::util::persist::Persister;
40 #[cfg(feature = "std")]
41 use lightning::util::wakers::Sleeper;
42 use lightning_rapid_gossip_sync::RapidGossipSync;
45 use core::time::Duration;
47 #[cfg(feature = "std")]
49 #[cfg(feature = "std")]
50 use core::sync::atomic::{AtomicBool, Ordering};
51 #[cfg(feature = "std")]
52 use std::thread::{self, JoinHandle};
53 #[cfg(feature = "std")]
54 use std::time::Instant;
56 #[cfg(not(feature = "std"))]
59 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
60 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
61 /// responsibilities are:
62 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
63 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
64 /// writing it to disk/backups by invoking the callback given to it at startup.
65 /// [`ChannelManager`] persistence should be done in the background.
66 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
67 /// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
68 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
69 /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
71 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
72 /// upon as doing so may result in high latency.
76 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
77 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
78 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
79 /// unilateral chain closure fees are at risk.
81 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
82 /// [`Event`]: lightning::events::Event
83 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
84 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
85 #[cfg(feature = "std")]
86 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
87 pub struct BackgroundProcessor {
88 stop_thread: Arc<AtomicBool>,
89 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
93 const FRESHNESS_TIMER: u64 = 60;
95 const FRESHNESS_TIMER: u64 = 1;
97 #[cfg(all(not(test), not(debug_assertions)))]
98 const PING_TIMER: u64 = 10;
99 /// Signature operations take a lot longer without compiler optimisations.
100 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
101 /// timeout is reached.
102 #[cfg(all(not(test), debug_assertions))]
103 const PING_TIMER: u64 = 30;
105 const PING_TIMER: u64 = 1;
108 const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
110 const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
112 /// Prune the network graph of stale entries hourly.
113 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
116 const SCORER_PERSIST_TIMER: u64 = 60 * 60;
118 const SCORER_PERSIST_TIMER: u64 = 1;
121 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
123 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
126 const REBROADCAST_TIMER: u64 = 30;
128 const REBROADCAST_TIMER: u64 = 1;
130 #[cfg(feature = "futures")]
131 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
132 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
133 #[cfg(feature = "futures")]
134 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
135 min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
137 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
139 P: Deref<Target = P2PGossipSync<G, U, L>>,
140 R: Deref<Target = RapidGossipSync<G, L>>,
141 G: Deref<Target = NetworkGraph<L>>,
145 where U::Target: UtxoLookup, L::Target: Logger {
146 /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
148 /// Rapid gossip sync from a trusted server.
155 P: Deref<Target = P2PGossipSync<G, U, L>>,
156 R: Deref<Target = RapidGossipSync<G, L>>,
157 G: Deref<Target = NetworkGraph<L>>,
160 > GossipSync<P, R, G, U, L>
161 where U::Target: UtxoLookup, L::Target: Logger {
162 fn network_graph(&self) -> Option<&G> {
164 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
165 GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
166 GossipSync::None => None,
170 fn prunable_network_graph(&self) -> Option<&G> {
172 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
173 GossipSync::Rapid(gossip_sync) => {
174 if gossip_sync.is_initial_sync_complete() {
175 Some(gossip_sync.network_graph())
180 GossipSync::None => None,
185 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
186 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
187 GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
189 U::Target: UtxoLookup,
192 /// Initializes a new [`GossipSync::P2P`] variant.
193 pub fn p2p(gossip_sync: P) -> Self {
194 GossipSync::P2P(gossip_sync)
198 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
199 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
201 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
204 &'a (dyn UtxoLookup + Send + Sync),
210 /// Initializes a new [`GossipSync::Rapid`] variant.
211 pub fn rapid(gossip_sync: R) -> Self {
212 GossipSync::Rapid(gossip_sync)
216 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
219 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
220 &RapidGossipSync<&'a NetworkGraph<L>, L>,
222 &'a (dyn UtxoLookup + Send + Sync),
228 /// Initializes a new [`GossipSync::None`] variant.
229 pub fn none() -> Self {
234 fn handle_network_graph_update<L: Deref>(
235 network_graph: &NetworkGraph<L>, event: &Event
236 ) where L::Target: Logger {
237 if let Event::PaymentPathFailed {
238 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
240 network_graph.handle_network_update(upd);
244 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
246 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
247 scorer: &'a S, event: &Event
250 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
251 let mut score = scorer.write_lock();
252 score.payment_path_failed(path, *scid);
254 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
255 // Reached if the destination explicitly failed it back. We treat this as a successful probe
256 // because the payment made it all the way to the destination with sufficient liquidity.
257 let mut score = scorer.write_lock();
258 score.probe_successful(path);
260 Event::PaymentPathSuccessful { path, .. } => {
261 let mut score = scorer.write_lock();
262 score.payment_path_successful(path);
264 Event::ProbeSuccessful { path, .. } => {
265 let mut score = scorer.write_lock();
266 score.probe_successful(path);
268 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
269 let mut score = scorer.write_lock();
270 score.probe_failed(path, *scid);
277 macro_rules! define_run_body {
279 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
280 $channel_manager: ident, $process_channel_manager_events: expr,
281 $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
282 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
283 $timer_elapsed: expr, $check_slow_await: expr
285 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
286 $channel_manager.timer_tick_occurred();
287 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
288 $chain_monitor.rebroadcast_pending_claims();
290 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
291 let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
292 let mut last_ping_call = $get_timer(PING_TIMER);
293 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
294 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
295 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
296 let mut have_pruned = false;
299 $process_channel_manager_events;
300 $process_chain_monitor_events;
301 $process_onion_message_handler_events;
303 // Note that the PeerManager::process_events may block on ChannelManager's locks,
304 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
305 // we want to ensure we get into `persist_manager` as quickly as we can, especially
306 // without running the normal event processing above and handing events to users.
308 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
309 // processing a message effectively at any point during this loop. In order to
310 // minimize the time between such processing completing and persisting the updated
311 // ChannelManager, we want to minimize methods blocking on a ChannelManager
312 // generally, and as a fallback place such blocking only immediately before
314 $peer_manager.as_ref().process_events();
316 // Exit the loop if the background processor was requested to stop.
317 if $loop_exit_check {
318 log_trace!($logger, "Terminating background processor.");
322 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
323 // see `await_start`'s use below.
324 let mut await_start = None;
325 if $check_slow_await { await_start = Some($get_timer(1)); }
327 let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
329 // Exit the loop if the background processor was requested to stop.
330 if $loop_exit_check {
331 log_trace!($logger, "Terminating background processor.");
335 if $channel_manager.get_and_clear_needs_persistence() {
336 log_trace!($logger, "Persisting ChannelManager...");
337 $persister.persist_manager(&*$channel_manager)?;
338 log_trace!($logger, "Done persisting ChannelManager.");
340 if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
341 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
342 $channel_manager.timer_tick_occurred();
343 last_freshness_call = $get_timer(FRESHNESS_TIMER);
345 if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
346 log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
347 $peer_manager.onion_message_handler().timer_tick_occurred();
348 last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
351 // On various platforms, we may be starved of CPU cycles for several reasons.
352 // E.g. on iOS, if we've been in the background, we will be entirely paused.
353 // Similarly, if we're on a desktop platform and the device has been asleep, we
354 // may not get any cycles.
355 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
356 // full second, at which point we assume sockets may have been killed (they
357 // appear to be at least on some platforms, even if it has only been a second).
358 // Note that we have to take care to not get here just because user event
359 // processing was slow at the top of the loop. For example, the sample client
360 // may call Bitcoin Core RPCs during event handling, which very often takes
361 // more than a handful of seconds to complete, and shouldn't disconnect all our
363 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
364 $peer_manager.as_ref().disconnect_all_peers();
365 last_ping_call = $get_timer(PING_TIMER);
366 } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
367 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
368 $peer_manager.as_ref().timer_tick_occurred();
369 last_ping_call = $get_timer(PING_TIMER);
372 // Note that we want to run a graph prune once not long after startup before
373 // falling back to our usual hourly prunes. This avoids short-lived clients never
374 // pruning their network graph. We run once 60 seconds after startup before
375 // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
376 // we prune after an initial sync completes.
377 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
378 let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
379 let should_prune = match $gossip_sync {
380 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
381 _ => prune_timer_elapsed,
384 // The network graph must not be pruned while rapid sync completion is pending
385 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
386 #[cfg(feature = "std")] {
387 log_trace!($logger, "Pruning and persisting network graph.");
388 network_graph.remove_stale_channels_and_tracking();
390 #[cfg(not(feature = "std"))] {
391 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
392 log_trace!($logger, "Persisting network graph.");
395 if let Err(e) = $persister.persist_graph(network_graph) {
396 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
401 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
402 last_prune_call = $get_timer(prune_timer);
405 if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
406 if let Some(ref scorer) = $scorer {
407 log_trace!($logger, "Persisting scorer");
408 if let Err(e) = $persister.persist_scorer(&scorer) {
409 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
412 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
415 if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
416 log_trace!($logger, "Rebroadcasting monitor's pending claims");
417 $chain_monitor.rebroadcast_pending_claims();
418 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
422 // After we exit, ensure we persist the ChannelManager one final time - this avoids
423 // some races where users quit while channel updates were in-flight, with
424 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
425 $persister.persist_manager(&*$channel_manager)?;
427 // Persist Scorer on exit
428 if let Some(ref scorer) = $scorer {
429 $persister.persist_scorer(&scorer)?;
432 // Persist NetworkGraph on exit
433 if let Some(network_graph) = $gossip_sync.network_graph() {
434 $persister.persist_graph(network_graph)?;
441 #[cfg(feature = "futures")]
442 pub(crate) mod futures_util {
443 use core::future::Future;
444 use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
446 use core::marker::Unpin;
447 pub(crate) struct Selector<
448 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
454 pub(crate) enum SelectorOutput {
459 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
460 > Future for Selector<A, B, C> {
461 type Output = SelectorOutput;
462 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
463 match Pin::new(&mut self.a).poll(ctx) {
464 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
467 match Pin::new(&mut self.b).poll(ctx) {
468 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
471 match Pin::new(&mut self.c).poll(ctx) {
472 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
479 // If we want to poll a future without an async context to figure out if it has completed or
480 // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
481 // but sadly there's a good bit of boilerplate here.
482 fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
483 fn dummy_waker_action(_: *const ()) { }
485 const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
486 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
487 pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
489 #[cfg(feature = "futures")]
490 use futures_util::{Selector, SelectorOutput, dummy_waker};
491 #[cfg(feature = "futures")]
494 /// Processes background events in a future.
496 /// `sleeper` should return a future which completes in the given amount of time and returns a
497 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
498 /// future which outputs `true`, the loop will exit and this function's future will complete.
499 /// The `sleeper` future is free to return early after it has triggered the exit condition.
501 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
503 /// Requires the `futures` feature. Note that while this method is available without the `std`
504 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
505 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
506 /// manually instead.
508 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
509 /// mobile device, where we may need to check for interruption of the application regularly. If you
510 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
511 /// are hundreds or thousands of simultaneous process calls running.
513 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
514 /// could setup `process_events_async` like this:
516 /// # use lightning::io;
517 /// # use std::sync::{Arc, RwLock};
518 /// # use std::sync::atomic::{AtomicBool, Ordering};
519 /// # use lightning_background_processor::{process_events_async, GossipSync};
520 /// # struct MyStore {}
521 /// # impl lightning::util::persist::KVStore for MyStore {
522 /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
523 /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
524 /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
525 /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
527 /// # struct MyEventHandler {}
528 /// # impl MyEventHandler {
529 /// # async fn handle_event(&self, _: lightning::events::Event) {}
531 /// # #[derive(Eq, PartialEq, Clone, Hash)]
532 /// # struct MySocketDescriptor {}
533 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
534 /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
535 /// # fn disconnect_socket(&mut self) {}
537 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
538 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
539 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
540 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
541 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
542 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
543 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
544 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
545 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
546 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
547 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
548 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
550 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
551 /// let background_persister = Arc::clone(&my_persister);
552 /// let background_event_handler = Arc::clone(&my_event_handler);
553 /// let background_chain_mon = Arc::clone(&my_chain_monitor);
554 /// let background_chan_man = Arc::clone(&my_channel_manager);
555 /// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
556 /// let background_peer_man = Arc::clone(&my_peer_manager);
557 /// let background_logger = Arc::clone(&my_logger);
558 /// let background_scorer = Arc::clone(&my_scorer);
560 /// // Setup the sleeper.
561 /// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
563 /// let sleeper = move |d| {
564 /// let mut receiver = stop_receiver.clone();
565 /// Box::pin(async move {
567 /// _ = tokio::time::sleep(d) => false,
568 /// _ = receiver.changed() => true,
573 /// let mobile_interruptable_platform = false;
575 /// let handle = tokio::spawn(async move {
576 /// process_events_async(
577 /// background_persister,
578 /// |e| background_event_handler.handle_event(e),
579 /// background_chain_mon,
580 /// background_chan_man,
581 /// background_gossip_sync,
582 /// background_peer_man,
583 /// background_logger,
584 /// Some(background_scorer),
586 /// mobile_interruptable_platform,
589 /// .expect("Failed to process events");
592 /// // Stop the background processing.
593 /// stop_sender.send(()).unwrap();
594 /// handle.await.unwrap();
597 #[cfg(feature = "futures")]
598 pub async fn process_events_async<
600 UL: 'static + Deref + Send + Sync,
601 CF: 'static + Deref + Send + Sync,
602 CW: 'static + Deref + Send + Sync,
603 T: 'static + Deref + Send + Sync,
604 ES: 'static + Deref + Send + Sync,
605 NS: 'static + Deref + Send + Sync,
606 SP: 'static + Deref + Send + Sync,
607 F: 'static + Deref + Send + Sync,
608 R: 'static + Deref + Send + Sync,
609 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
610 L: 'static + Deref + Send + Sync,
611 P: 'static + Deref + Send + Sync,
612 EventHandlerFuture: core::future::Future<Output = ()>,
613 EventHandler: Fn(Event) -> EventHandlerFuture,
614 PS: 'static + Deref + Send,
615 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
616 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
617 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
618 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
619 PM: 'static + Deref + Send + Sync,
620 S: 'static + Deref<Target = SC> + Send + Sync,
621 SC: for<'b> WriteableScore<'b>,
622 SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
623 Sleeper: Fn(Duration) -> SleepFuture
625 persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
626 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
627 sleeper: Sleeper, mobile_interruptable_platform: bool,
628 ) -> Result<(), lightning::io::Error>
630 UL::Target: 'static + UtxoLookup,
631 CF::Target: 'static + chain::Filter,
632 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
633 T::Target: 'static + BroadcasterInterface,
634 ES::Target: 'static + EntropySource,
635 NS::Target: 'static + NodeSigner,
636 SP::Target: 'static + SignerProvider,
637 F::Target: 'static + FeeEstimator,
638 R::Target: 'static + Router,
639 L::Target: 'static + Logger,
640 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
641 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
642 PM::Target: APeerManager + Send + Sync,
644 let mut should_break = false;
645 let async_event_handler = |event| {
646 let network_graph = gossip_sync.network_graph();
647 let event_handler = &event_handler;
648 let scorer = &scorer;
649 let logger = &logger;
650 let persister = &persister;
652 if let Some(network_graph) = network_graph {
653 handle_network_graph_update(network_graph, &event)
655 if let Some(ref scorer) = scorer {
656 if update_scorer(scorer, &event) {
657 log_trace!(logger, "Persisting scorer after update");
658 if let Err(e) = persister.persist_scorer(&scorer) {
659 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
663 event_handler(event).await;
667 persister, chain_monitor,
668 chain_monitor.process_pending_events_async(async_event_handler).await,
669 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
670 peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
671 gossip_sync, logger, scorer, should_break, {
673 a: channel_manager.get_event_or_persistence_needed_future(),
674 b: chain_monitor.get_update_future(),
675 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
678 SelectorOutput::A|SelectorOutput::B => {},
679 SelectorOutput::C(exit) => {
683 }, |t| sleeper(Duration::from_secs(t)),
684 |fut: &mut SleepFuture, _| {
685 let mut waker = dummy_waker();
686 let mut ctx = task::Context::from_waker(&mut waker);
687 match core::pin::Pin::new(fut).poll(&mut ctx) {
688 task::Poll::Ready(exit) => { should_break = exit; true },
689 task::Poll::Pending => false,
691 }, mobile_interruptable_platform
695 #[cfg(feature = "futures")]
696 async fn process_onion_message_handler_events_async<
697 EventHandlerFuture: core::future::Future<Output = ()>,
698 EventHandler: Fn(Event) -> EventHandlerFuture,
699 PM: 'static + Deref + Send + Sync,
701 peer_manager: &PM, handler: EventHandler
704 PM::Target: APeerManager + Send + Sync,
706 use lightning::events::EventsProvider;
708 let events = core::cell::RefCell::new(Vec::new());
709 peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
711 for event in events.into_inner() {
716 #[cfg(feature = "std")]
717 impl BackgroundProcessor {
718 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
721 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
722 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
723 /// either [`join`] or [`stop`].
725 /// # Data Persistence
727 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
728 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
729 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
730 /// provided implementation.
732 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
733 /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
734 /// See the `lightning-persister` crate for LDK's provided implementation.
736 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
737 /// error or call [`join`] and handle any error that may arise. For the latter case,
738 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
742 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
743 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
744 /// functionality implemented by other handlers.
745 /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
747 /// # Rapid Gossip Sync
749 /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
750 /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
751 /// until the [`RapidGossipSync`] instance completes its first sync.
753 /// [top-level documentation]: BackgroundProcessor
754 /// [`join`]: Self::join
755 /// [`stop`]: Self::stop
756 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
757 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
758 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
759 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
760 /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
761 /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
764 UL: 'static + Deref + Send + Sync,
765 CF: 'static + Deref + Send + Sync,
766 CW: 'static + Deref + Send + Sync,
767 T: 'static + Deref + Send + Sync,
768 ES: 'static + Deref + Send + Sync,
769 NS: 'static + Deref + Send + Sync,
770 SP: 'static + Deref + Send + Sync,
771 F: 'static + Deref + Send + Sync,
772 R: 'static + Deref + Send + Sync,
773 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
774 L: 'static + Deref + Send + Sync,
775 P: 'static + Deref + Send + Sync,
776 EH: 'static + EventHandler + Send,
777 PS: 'static + Deref + Send,
778 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
779 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
780 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
781 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
782 PM: 'static + Deref + Send + Sync,
783 S: 'static + Deref<Target = SC> + Send + Sync,
784 SC: for <'b> WriteableScore<'b>,
786 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
787 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
790 UL::Target: 'static + UtxoLookup,
791 CF::Target: 'static + chain::Filter,
792 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
793 T::Target: 'static + BroadcasterInterface,
794 ES::Target: 'static + EntropySource,
795 NS::Target: 'static + NodeSigner,
796 SP::Target: 'static + SignerProvider,
797 F::Target: 'static + FeeEstimator,
798 R::Target: 'static + Router,
799 L::Target: 'static + Logger,
800 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
801 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
802 PM::Target: APeerManager + Send + Sync,
804 let stop_thread = Arc::new(AtomicBool::new(false));
805 let stop_thread_clone = stop_thread.clone();
806 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
807 let event_handler = |event| {
808 let network_graph = gossip_sync.network_graph();
809 if let Some(network_graph) = network_graph {
810 handle_network_graph_update(network_graph, &event)
812 if let Some(ref scorer) = scorer {
813 if update_scorer(scorer, &event) {
814 log_trace!(logger, "Persisting scorer after update");
815 if let Err(e) = persister.persist_scorer(&scorer) {
816 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
820 event_handler.handle_event(event);
823 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
824 channel_manager, channel_manager.process_pending_events(&event_handler),
826 peer_manager.onion_message_handler().process_pending_events(&event_handler),
827 gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
828 { Sleeper::from_two_futures(
829 channel_manager.get_event_or_persistence_needed_future(),
830 chain_monitor.get_update_future()
831 ).wait_timeout(Duration::from_millis(100)); },
832 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false
835 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
838 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
839 /// [`ChannelManager`].
843 /// This function panics if the background thread has panicked such as while persisting or
846 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
847 pub fn join(mut self) -> Result<(), std::io::Error> {
848 assert!(self.thread_handle.is_some());
852 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
853 /// [`ChannelManager`].
857 /// This function panics if the background thread has panicked such as while persisting or
860 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
861 pub fn stop(mut self) -> Result<(), std::io::Error> {
862 assert!(self.thread_handle.is_some());
863 self.stop_and_join_thread()
866 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
867 self.stop_thread.store(true, Ordering::Release);
871 fn join_thread(&mut self) -> Result<(), std::io::Error> {
872 match self.thread_handle.take() {
873 Some(handle) => handle.join().unwrap(),
879 #[cfg(feature = "std")]
880 impl Drop for BackgroundProcessor {
882 self.stop_and_join_thread().unwrap();
886 #[cfg(all(feature = "std", test))]
888 use bitcoin::blockdata::constants::{genesis_block, ChainHash};
889 use bitcoin::blockdata::locktime::absolute::LockTime;
890 use bitcoin::blockdata::transaction::{Transaction, TxOut};
891 use bitcoin::network::constants::Network;
892 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
893 use lightning::chain::{BestBlock, Confirm, chainmonitor};
894 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
895 use lightning::sign::{InMemorySigner, KeysManager};
896 use lightning::chain::transaction::OutPoint;
897 use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
898 use lightning::{get_event_msg, get_event};
899 use lightning::ln::PaymentHash;
900 use lightning::ln::channelmanager;
901 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
902 use lightning::ln::features::{ChannelFeatures, NodeFeatures};
903 use lightning::ln::functional_test_utils::*;
904 use lightning::ln::msgs::{ChannelMessageHandler, Init};
905 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
906 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
907 use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
908 use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
909 use lightning::util::config::UserConfig;
910 use lightning::util::ser::Writeable;
911 use lightning::util::test_utils;
912 use lightning::util::persist::{KVStore,
913 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
914 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
915 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
916 use lightning_persister::fs_store::FilesystemStore;
917 use std::collections::VecDeque;
919 use std::path::PathBuf;
920 use std::sync::{Arc, Mutex};
921 use std::sync::mpsc::SyncSender;
922 use std::time::Duration;
923 use lightning_rapid_gossip_sync::RapidGossipSync;
924 use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
926 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
928 #[derive(Clone, Hash, PartialEq, Eq)]
929 struct TestDescriptor{}
930 impl SocketDescriptor for TestDescriptor {
931 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
935 fn disconnect_socket(&mut self) {}
939 type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
940 #[cfg(not(c_bindings))]
941 type LockingWrapper<T> = Mutex<T>;
943 type ChannelManager =
944 channelmanager::ChannelManager<
946 Arc<test_utils::TestBroadcaster>,
950 Arc<test_utils::TestFeeEstimator>,
952 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
953 Arc<test_utils::TestLogger>,
954 Arc<LockingWrapper<TestScorer>>,
958 Arc<test_utils::TestLogger>>;
960 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
962 type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
963 type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
966 node: Arc<ChannelManager>,
967 p2p_gossip_sync: PGS,
968 rapid_gossip_sync: RGS,
969 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
970 chain_monitor: Arc<ChainMonitor>,
971 kv_store: Arc<FilesystemStore>,
972 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
973 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
974 logger: Arc<test_utils::TestLogger>,
975 best_block: BestBlock,
976 scorer: Arc<LockingWrapper<TestScorer>>,
980 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
981 GossipSync::P2P(self.p2p_gossip_sync.clone())
984 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
985 GossipSync::Rapid(self.rapid_gossip_sync.clone())
988 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
995 let data_dir = self.kv_store.get_data_dir();
996 match fs::remove_dir_all(data_dir.clone()) {
997 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
1004 graph_error: Option<(std::io::ErrorKind, &'static str)>,
1005 graph_persistence_notifier: Option<SyncSender<()>>,
1006 manager_error: Option<(std::io::ErrorKind, &'static str)>,
1007 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
1008 kv_store: FilesystemStore,
1012 fn new(data_dir: PathBuf) -> Self {
1013 let kv_store = FilesystemStore::new(data_dir);
1014 Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1017 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1018 Self { graph_error: Some((error, message)), ..self }
1021 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1022 Self { graph_persistence_notifier: Some(sender), ..self }
1025 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1026 Self { manager_error: Some((error, message)), ..self }
1029 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1030 Self { scorer_error: Some((error, message)), ..self }
1034 impl KVStore for Persister {
1035 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1036 self.kv_store.read(primary_namespace, secondary_namespace, key)
1039 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1040 if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1041 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1042 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1044 if let Some((error, message)) = self.manager_error {
1045 return Err(std::io::Error::new(error, message))
1049 if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1050 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1051 key == NETWORK_GRAPH_PERSISTENCE_KEY
1053 if let Some(sender) = &self.graph_persistence_notifier {
1054 match sender.send(()) {
1056 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1060 if let Some((error, message)) = self.graph_error {
1061 return Err(std::io::Error::new(error, message))
1065 if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1066 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1067 key == SCORER_PERSISTENCE_KEY
1069 if let Some((error, message)) = self.scorer_error {
1070 return Err(std::io::Error::new(error, message))
1074 self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1077 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1078 self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1081 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1082 self.kv_store.list(primary_namespace, secondary_namespace)
1087 event_expectations: Option<VecDeque<TestResult>>,
1092 PaymentFailure { path: Path, short_channel_id: u64 },
1093 PaymentSuccess { path: Path },
1094 ProbeFailure { path: Path },
1095 ProbeSuccess { path: Path },
1100 Self { event_expectations: None }
1103 fn expect(&mut self, expectation: TestResult) {
1104 self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1108 impl lightning::util::ser::Writeable for TestScorer {
1109 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1112 impl ScoreLookUp for TestScorer {
1113 type ScoreParams = ();
1114 fn channel_penalty_msat(
1115 &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1116 ) -> u64 { unimplemented!(); }
1119 impl ScoreUpdate for TestScorer {
1120 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64) {
1121 if let Some(expectations) = &mut self.event_expectations {
1122 match expectations.pop_front().unwrap() {
1123 TestResult::PaymentFailure { path, short_channel_id } => {
1124 assert_eq!(actual_path, &path);
1125 assert_eq!(actual_short_channel_id, short_channel_id);
1127 TestResult::PaymentSuccess { path } => {
1128 panic!("Unexpected successful payment path: {:?}", path)
1130 TestResult::ProbeFailure { path } => {
1131 panic!("Unexpected probe failure: {:?}", path)
1133 TestResult::ProbeSuccess { path } => {
1134 panic!("Unexpected probe success: {:?}", path)
1140 fn payment_path_successful(&mut self, actual_path: &Path) {
1141 if let Some(expectations) = &mut self.event_expectations {
1142 match expectations.pop_front().unwrap() {
1143 TestResult::PaymentFailure { path, .. } => {
1144 panic!("Unexpected payment path failure: {:?}", path)
1146 TestResult::PaymentSuccess { path } => {
1147 assert_eq!(actual_path, &path);
1149 TestResult::ProbeFailure { path } => {
1150 panic!("Unexpected probe failure: {:?}", path)
1152 TestResult::ProbeSuccess { path } => {
1153 panic!("Unexpected probe success: {:?}", path)
1159 fn probe_failed(&mut self, actual_path: &Path, _: u64) {
1160 if let Some(expectations) = &mut self.event_expectations {
1161 match expectations.pop_front().unwrap() {
1162 TestResult::PaymentFailure { path, .. } => {
1163 panic!("Unexpected payment path failure: {:?}", path)
1165 TestResult::PaymentSuccess { path } => {
1166 panic!("Unexpected payment path success: {:?}", path)
1168 TestResult::ProbeFailure { path } => {
1169 assert_eq!(actual_path, &path);
1171 TestResult::ProbeSuccess { path } => {
1172 panic!("Unexpected probe success: {:?}", path)
1177 fn probe_successful(&mut self, actual_path: &Path) {
1178 if let Some(expectations) = &mut self.event_expectations {
1179 match expectations.pop_front().unwrap() {
1180 TestResult::PaymentFailure { path, .. } => {
1181 panic!("Unexpected payment path failure: {:?}", path)
1183 TestResult::PaymentSuccess { path } => {
1184 panic!("Unexpected payment path success: {:?}", path)
1186 TestResult::ProbeFailure { path } => {
1187 panic!("Unexpected probe failure: {:?}", path)
1189 TestResult::ProbeSuccess { path } => {
1190 assert_eq!(actual_path, &path);
1198 impl lightning::routing::scoring::Score for TestScorer {}
1200 impl Drop for TestScorer {
1201 fn drop(&mut self) {
1202 if std::thread::panicking() {
1206 if let Some(event_expectations) = &self.event_expectations {
1207 if !event_expectations.is_empty() {
1208 panic!("Unsatisfied event expectations: {:?}", event_expectations);
1214 fn get_full_filepath(filepath: String, filename: String) -> String {
1215 let mut path = PathBuf::from(filepath);
1216 path.push(filename);
1217 path.to_str().unwrap().to_string()
1220 fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1221 let persist_temp_path = env::temp_dir().join(persist_dir);
1222 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1223 let network = Network::Bitcoin;
1224 let mut nodes = Vec::new();
1225 for i in 0..num_nodes {
1226 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1227 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1228 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1229 let genesis_block = genesis_block(network);
1230 let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1231 let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1232 let seed = [i as u8; 32];
1233 let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1234 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1235 let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1236 let now = Duration::from_secs(genesis_block.header.time as u64);
1237 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1238 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1239 let best_block = BestBlock::from_network(network);
1240 let params = ChainParameters { network, best_block };
1241 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1242 let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1243 let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1244 let msg_handler = MessageHandler {
1245 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1246 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1247 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1249 let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1250 let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1254 for i in 0..num_nodes {
1255 for j in (i+1)..num_nodes {
1256 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1257 features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1259 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1260 features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1265 (persist_dir, nodes)
1268 macro_rules! open_channel {
1269 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1270 begin_open_channel!($node_a, $node_b, $channel_value);
1271 let events = $node_a.node.get_and_clear_pending_events();
1272 assert_eq!(events.len(), 1);
1273 let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1274 $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1275 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1276 get_event!($node_b, Event::ChannelPending);
1277 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1278 get_event!($node_a, Event::ChannelPending);
1283 macro_rules! begin_open_channel {
1284 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1285 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1286 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1287 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1291 macro_rules! handle_funding_generation_ready {
1292 ($event: expr, $channel_value: expr) => {{
1294 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1295 assert_eq!(channel_value_satoshis, $channel_value);
1296 assert_eq!(user_channel_id, 42);
1298 let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1299 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1301 (temporary_channel_id, tx)
1303 _ => panic!("Unexpected event"),
1308 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1309 for i in 1..=depth {
1310 let prev_blockhash = node.best_block.block_hash();
1311 let height = node.best_block.height() + 1;
1312 let header = create_dummy_header(prev_blockhash, height);
1313 let txdata = vec![(0, tx)];
1314 node.best_block = BestBlock::new(header.block_hash(), height);
1317 node.node.transactions_confirmed(&header, &txdata, height);
1318 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1320 x if x == depth => {
1321 node.node.best_block_updated(&header, height);
1322 node.chain_monitor.best_block_updated(&header, height);
1328 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1329 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1333 fn test_background_processor() {
1334 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1335 // updates. Also test that when new updates are available, the manager signals that it needs
1336 // re-persistence and is successfully re-persisted.
1337 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1339 // Go through the channel creation process so that each node has something to persist. Since
1340 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1341 // avoid a race with processing events.
1342 let tx = open_channel!(nodes[0], nodes[1], 100000);
1344 // Initiate the background processors to watch each node.
1345 let data_dir = nodes[0].kv_store.get_data_dir();
1346 let persister = Arc::new(Persister::new(data_dir));
1347 let event_handler = |_: _| {};
1348 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1350 macro_rules! check_persisted_data {
1351 ($node: expr, $filepath: expr) => {
1352 let mut expected_bytes = Vec::new();
1354 expected_bytes.clear();
1355 match $node.write(&mut expected_bytes) {
1357 match std::fs::read($filepath) {
1359 if bytes == expected_bytes {
1368 Err(e) => panic!("Unexpected error: {}", e)
1374 // Check that the initial channel manager data is persisted as expected.
1375 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1376 check_persisted_data!(nodes[0].node, filepath.clone());
1379 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1382 // Force-close the channel.
1383 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1385 // Check that the force-close updates are persisted.
1386 check_persisted_data!(nodes[0].node, filepath.clone());
1388 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1391 // Check network graph is persisted
1392 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1393 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1395 // Check scorer is persisted
1396 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1397 check_persisted_data!(nodes[0].scorer, filepath.clone());
1399 if !std::thread::panicking() {
1400 bg_processor.stop().unwrap();
1405 fn test_timer_tick_called() {
1407 // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1408 // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
1409 // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
1410 // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
1411 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1412 let data_dir = nodes[0].kv_store.get_data_dir();
1413 let persister = Arc::new(Persister::new(data_dir));
1414 let event_handler = |_: _| {};
1415 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1417 let log_entries = nodes[0].logger.lines.lock().unwrap();
1418 let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1419 let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1420 let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1421 let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
1422 if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1423 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1424 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
1425 log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
1430 if !std::thread::panicking() {
1431 bg_processor.stop().unwrap();
1436 fn test_channel_manager_persist_error() {
1437 // Test that if we encounter an error during manager persistence, the thread panics.
1438 let (_, nodes) = create_nodes(2, "test_persist_error");
1439 open_channel!(nodes[0], nodes[1], 100000);
1441 let data_dir = nodes[0].kv_store.get_data_dir();
1442 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1443 let event_handler = |_: _| {};
1444 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1445 match bg_processor.join() {
1446 Ok(_) => panic!("Expected error persisting manager"),
1448 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1449 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1455 #[cfg(feature = "futures")]
1456 async fn test_channel_manager_persist_error_async() {
1457 // Test that if we encounter an error during manager persistence, the thread panics.
1458 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1459 open_channel!(nodes[0], nodes[1], 100000);
1461 let data_dir = nodes[0].kv_store.get_data_dir();
1462 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1464 let bp_future = super::process_events_async(
1465 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1466 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1467 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1468 Box::pin(async move {
1469 tokio::time::sleep(dur).await;
1474 match bp_future.await {
1475 Ok(_) => panic!("Expected error persisting manager"),
1477 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1478 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1484 fn test_network_graph_persist_error() {
1485 // Test that if we encounter an error during network graph persistence, an error gets returned.
1486 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1487 let data_dir = nodes[0].kv_store.get_data_dir();
1488 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1489 let event_handler = |_: _| {};
1490 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1492 match bg_processor.stop() {
1493 Ok(_) => panic!("Expected error persisting network graph"),
1495 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1496 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1502 fn test_scorer_persist_error() {
1503 // Test that if we encounter an error during scorer persistence, an error gets returned.
1504 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1505 let data_dir = nodes[0].kv_store.get_data_dir();
1506 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1507 let event_handler = |_: _| {};
1508 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1510 match bg_processor.stop() {
1511 Ok(_) => panic!("Expected error persisting scorer"),
1513 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1514 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1520 fn test_background_event_handling() {
1521 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1522 let channel_value = 100000;
1523 let data_dir = nodes[0].kv_store.get_data_dir();
1524 let persister = Arc::new(Persister::new(data_dir.clone()));
1526 // Set up a background event handler for FundingGenerationReady events.
1527 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1528 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1529 let event_handler = move |event: Event| match event {
1530 Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1531 Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1532 Event::ChannelReady { .. } => {},
1533 _ => panic!("Unexpected event: {:?}", event),
1536 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1538 // Open a channel and check that the FundingGenerationReady event was handled.
1539 begin_open_channel!(nodes[0], nodes[1], channel_value);
1540 let (temporary_channel_id, funding_tx) = funding_generation_recv
1541 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1542 .expect("FundingGenerationReady not handled within deadline");
1543 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1544 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1545 get_event!(nodes[1], Event::ChannelPending);
1546 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1547 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1548 .expect("ChannelPending not handled within deadline");
1550 // Confirm the funding transaction.
1551 confirm_transaction(&mut nodes[0], &funding_tx);
1552 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1553 confirm_transaction(&mut nodes[1], &funding_tx);
1554 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1555 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1556 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1557 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1558 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1560 if !std::thread::panicking() {
1561 bg_processor.stop().unwrap();
1564 // Set up a background event handler for SpendableOutputs events.
1565 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1566 let event_handler = move |event: Event| match event {
1567 Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1568 Event::ChannelReady { .. } => {},
1569 Event::ChannelClosed { .. } => {},
1570 _ => panic!("Unexpected event: {:?}", event),
1572 let persister = Arc::new(Persister::new(data_dir));
1573 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1575 // Force close the channel and check that the SpendableOutputs event was handled.
1576 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1577 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1578 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1580 let event = receiver
1581 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1582 .expect("Events not handled within deadline");
1584 Event::SpendableOutputs { .. } => {},
1585 _ => panic!("Unexpected event: {:?}", event),
1588 if !std::thread::panicking() {
1589 bg_processor.stop().unwrap();
1594 fn test_scorer_persistence() {
1595 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1596 let data_dir = nodes[0].kv_store.get_data_dir();
1597 let persister = Arc::new(Persister::new(data_dir));
1598 let event_handler = |_: _| {};
1599 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1602 let log_entries = nodes[0].logger.lines.lock().unwrap();
1603 let expected_log = "Persisting scorer".to_string();
1604 if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1609 if !std::thread::panicking() {
1610 bg_processor.stop().unwrap();
1614 macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1615 ($nodes: expr, $receive: expr, $sleep: expr) => {
1616 let features = ChannelFeatures::empty();
1617 $nodes[0].network_graph.add_channel_from_partial_announcement(
1618 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1619 ).expect("Failed to update channel from partial announcement");
1620 let original_graph_description = $nodes[0].network_graph.to_string();
1621 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1622 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1626 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1627 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1628 if *log_entries.get(&("lightning_background_processor", loop_counter))
1631 // Wait until the loop has gone around at least twice.
1636 let initialization_input = vec![
1637 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1638 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1639 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1640 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1641 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1642 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1643 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1644 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1645 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1646 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1647 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1648 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1649 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1651 $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1653 // this should have added two channels and pruned the previous one.
1654 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1656 $receive.expect("Network graph not pruned within deadline");
1658 // all channels should now be pruned
1659 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1664 fn test_not_pruning_network_graph_until_graph_sync_completion() {
1665 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1667 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1668 let data_dir = nodes[0].kv_store.get_data_dir();
1669 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1671 let event_handler = |_: _| {};
1672 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1674 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1675 receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1676 std::thread::sleep(Duration::from_millis(1)));
1678 background_processor.stop().unwrap();
1682 #[cfg(feature = "futures")]
1683 async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1684 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1686 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1687 let data_dir = nodes[0].kv_store.get_data_dir();
1688 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1690 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1691 let bp_future = super::process_events_async(
1692 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1693 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1694 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1695 let mut exit_receiver = exit_receiver.clone();
1696 Box::pin(async move {
1698 _ = tokio::time::sleep(dur) => false,
1699 _ = exit_receiver.changed() => true,
1705 let t1 = tokio::spawn(bp_future);
1706 let t2 = tokio::spawn(async move {
1707 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1710 tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1711 if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1715 }, tokio::time::sleep(Duration::from_millis(1)).await);
1716 exit_sender.send(()).unwrap();
1718 let (r1, r2) = tokio::join!(t1, t2);
1719 r1.unwrap().unwrap();
1723 macro_rules! do_test_payment_path_scoring {
1724 ($nodes: expr, $receive: expr) => {
1725 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1726 // that we update the scorer upon a payment path succeeding (note that the channel must be
1727 // public or else we won't score it).
1728 // A background event handler for FundingGenerationReady events must be hooked up to a
1729 // running background processor.
1730 let scored_scid = 4242;
1731 let secp_ctx = Secp256k1::new();
1732 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1733 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1735 let path = Path { hops: vec![RouteHop {
1737 node_features: NodeFeatures::empty(),
1738 short_channel_id: scored_scid,
1739 channel_features: ChannelFeatures::empty(),
1741 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1742 maybe_announced_channel: true,
1743 }], blinded_tail: None };
1745 $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1746 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1748 payment_hash: PaymentHash([42; 32]),
1749 payment_failed_permanently: false,
1750 failure: PathFailure::OnPath { network_update: None },
1752 short_channel_id: Some(scored_scid),
1754 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1756 Event::PaymentPathFailed { .. } => {},
1757 _ => panic!("Unexpected event"),
1760 // Ensure we'll score payments that were explicitly failed back by the destination as
1762 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1763 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1765 payment_hash: PaymentHash([42; 32]),
1766 payment_failed_permanently: true,
1767 failure: PathFailure::OnPath { network_update: None },
1769 short_channel_id: None,
1771 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1773 Event::PaymentPathFailed { .. } => {},
1774 _ => panic!("Unexpected event"),
1777 $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1778 $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1779 payment_id: PaymentId([42; 32]),
1783 let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1785 Event::PaymentPathSuccessful { .. } => {},
1786 _ => panic!("Unexpected event"),
1789 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1790 $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1791 payment_id: PaymentId([42; 32]),
1792 payment_hash: PaymentHash([42; 32]),
1795 let event = $receive.expect("ProbeSuccessful not handled within deadline");
1797 Event::ProbeSuccessful { .. } => {},
1798 _ => panic!("Unexpected event"),
1801 $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1802 $nodes[0].node.push_pending_event(Event::ProbeFailed {
1803 payment_id: PaymentId([42; 32]),
1804 payment_hash: PaymentHash([42; 32]),
1806 short_channel_id: Some(scored_scid),
1808 let event = $receive.expect("ProbeFailure not handled within deadline");
1810 Event::ProbeFailed { .. } => {},
1811 _ => panic!("Unexpected event"),
1817 fn test_payment_path_scoring() {
1818 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1819 let event_handler = move |event: Event| match event {
1820 Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1821 Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1822 Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1823 Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1824 _ => panic!("Unexpected event: {:?}", event),
1827 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1828 let data_dir = nodes[0].kv_store.get_data_dir();
1829 let persister = Arc::new(Persister::new(data_dir));
1830 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1832 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1834 if !std::thread::panicking() {
1835 bg_processor.stop().unwrap();
1838 let log_entries = nodes[0].logger.lines.lock().unwrap();
1839 let expected_log = "Persisting scorer after update".to_string();
1840 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1844 #[cfg(feature = "futures")]
1845 async fn test_payment_path_scoring_async() {
1846 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1847 let event_handler = move |event: Event| {
1848 let sender_ref = sender.clone();
1851 Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1852 Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1853 Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1854 Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1855 _ => panic!("Unexpected event: {:?}", event),
1860 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1861 let data_dir = nodes[0].kv_store.get_data_dir();
1862 let persister = Arc::new(Persister::new(data_dir));
1864 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1866 let bp_future = super::process_events_async(
1867 persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1868 nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1869 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1870 let mut exit_receiver = exit_receiver.clone();
1871 Box::pin(async move {
1873 _ = tokio::time::sleep(dur) => false,
1874 _ = exit_receiver.changed() => true,
1879 let t1 = tokio::spawn(bp_future);
1880 let t2 = tokio::spawn(async move {
1881 do_test_payment_path_scoring!(nodes, receiver.recv().await);
1882 exit_sender.send(()).unwrap();
1884 let log_entries = nodes[0].logger.lines.lock().unwrap();
1885 let expected_log = "Persisting scorer after update".to_string();
1886 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1889 let (r1, r2) = tokio::join!(t1, t2);
1890 r1.unwrap().unwrap();