1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 // Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
6 #![deny(broken_intra_doc_links)]
7 #![deny(private_intra_doc_links)]
10 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
12 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
14 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
16 #[cfg(any(test, feature = "std"))]
19 #[cfg(not(feature = "std"))]
22 #[macro_use] extern crate lightning;
23 extern crate lightning_rapid_gossip_sync;
26 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
27 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
28 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
29 use lightning::events::{Event, PathFailure};
30 #[cfg(feature = "std")]
31 use lightning::events::{EventHandler, EventsProvider};
32 use lightning::ln::channelmanager::ChannelManager;
33 use lightning::ln::msgs::OnionMessageHandler;
34 use lightning::ln::peer_handler::APeerManager;
35 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
36 use lightning::routing::utxo::UtxoLookup;
37 use lightning::routing::router::Router;
38 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
39 use lightning::util::logger::Logger;
40 use lightning::util::persist::Persister;
41 #[cfg(feature = "std")]
42 use lightning::util::wakers::Sleeper;
43 use lightning_rapid_gossip_sync::RapidGossipSync;
46 use core::time::Duration;
48 #[cfg(feature = "std")]
50 #[cfg(feature = "std")]
51 use core::sync::atomic::{AtomicBool, Ordering};
52 #[cfg(feature = "std")]
53 use std::thread::{self, JoinHandle};
54 #[cfg(feature = "std")]
55 use std::time::Instant;
57 #[cfg(not(feature = "std"))]
60 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
61 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
62 /// responsibilities are:
63 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
64 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
65 /// writing it to disk/backups by invoking the callback given to it at startup.
66 /// [`ChannelManager`] persistence should be done in the background.
67 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
68 /// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
69 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
70 /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
72 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
73 /// upon as doing so may result in high latency.
77 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
78 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
79 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
80 /// unilateral chain closure fees are at risk.
82 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
83 /// [`Event`]: lightning::events::Event
84 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
85 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
86 #[cfg(feature = "std")]
87 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
88 pub struct BackgroundProcessor {
89 stop_thread: Arc<AtomicBool>,
90 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
94 const FRESHNESS_TIMER: u64 = 60;
96 const FRESHNESS_TIMER: u64 = 1;
98 #[cfg(all(not(test), not(debug_assertions)))]
99 const PING_TIMER: u64 = 10;
100 /// Signature operations take a lot longer without compiler optimisations.
101 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
102 /// timeout is reached.
103 #[cfg(all(not(test), debug_assertions))]
104 const PING_TIMER: u64 = 30;
106 const PING_TIMER: u64 = 1;
109 const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
111 const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
113 /// Prune the network graph of stale entries hourly.
114 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
117 const SCORER_PERSIST_TIMER: u64 = 60 * 5;
119 const SCORER_PERSIST_TIMER: u64 = 1;
122 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
124 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
127 const REBROADCAST_TIMER: u64 = 30;
129 const REBROADCAST_TIMER: u64 = 1;
131 #[cfg(feature = "futures")]
132 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
133 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
134 #[cfg(feature = "futures")]
135 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
136 min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
138 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
140 P: Deref<Target = P2PGossipSync<G, U, L>>,
141 R: Deref<Target = RapidGossipSync<G, L>>,
142 G: Deref<Target = NetworkGraph<L>>,
146 where U::Target: UtxoLookup, L::Target: Logger {
147 /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
149 /// Rapid gossip sync from a trusted server.
156 P: Deref<Target = P2PGossipSync<G, U, L>>,
157 R: Deref<Target = RapidGossipSync<G, L>>,
158 G: Deref<Target = NetworkGraph<L>>,
161 > GossipSync<P, R, G, U, L>
162 where U::Target: UtxoLookup, L::Target: Logger {
163 fn network_graph(&self) -> Option<&G> {
165 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
166 GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
167 GossipSync::None => None,
171 fn prunable_network_graph(&self) -> Option<&G> {
173 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
174 GossipSync::Rapid(gossip_sync) => {
175 if gossip_sync.is_initial_sync_complete() {
176 Some(gossip_sync.network_graph())
181 GossipSync::None => None,
186 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
187 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
188 GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
190 U::Target: UtxoLookup,
193 /// Initializes a new [`GossipSync::P2P`] variant.
194 pub fn p2p(gossip_sync: P) -> Self {
195 GossipSync::P2P(gossip_sync)
199 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
200 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
202 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
205 &'a (dyn UtxoLookup + Send + Sync),
211 /// Initializes a new [`GossipSync::Rapid`] variant.
212 pub fn rapid(gossip_sync: R) -> Self {
213 GossipSync::Rapid(gossip_sync)
217 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
220 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
221 &RapidGossipSync<&'a NetworkGraph<L>, L>,
223 &'a (dyn UtxoLookup + Send + Sync),
229 /// Initializes a new [`GossipSync::None`] variant.
230 pub fn none() -> Self {
235 fn handle_network_graph_update<L: Deref>(
236 network_graph: &NetworkGraph<L>, event: &Event
237 ) where L::Target: Logger {
238 if let Event::PaymentPathFailed {
239 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
241 network_graph.handle_network_update(upd);
245 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
247 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
248 scorer: &'a S, event: &Event, duration_since_epoch: Duration,
251 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
252 let mut score = scorer.write_lock();
253 score.payment_path_failed(path, *scid, duration_since_epoch);
255 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
256 // Reached if the destination explicitly failed it back. We treat this as a successful probe
257 // because the payment made it all the way to the destination with sufficient liquidity.
258 let mut score = scorer.write_lock();
259 score.probe_successful(path, duration_since_epoch);
261 Event::PaymentPathSuccessful { path, .. } => {
262 let mut score = scorer.write_lock();
263 score.payment_path_successful(path, duration_since_epoch);
265 Event::ProbeSuccessful { path, .. } => {
266 let mut score = scorer.write_lock();
267 score.probe_successful(path, duration_since_epoch);
269 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
270 let mut score = scorer.write_lock();
271 score.probe_failed(path, *scid, duration_since_epoch);
278 macro_rules! define_run_body {
280 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
281 $channel_manager: ident, $process_channel_manager_events: expr,
282 $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
283 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
284 $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
286 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
287 $channel_manager.timer_tick_occurred();
288 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
289 $chain_monitor.rebroadcast_pending_claims();
291 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
292 let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
293 let mut last_ping_call = $get_timer(PING_TIMER);
294 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
295 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
296 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
297 let mut have_pruned = false;
298 let mut have_decayed_scorer = false;
301 $process_channel_manager_events;
302 $process_chain_monitor_events;
303 $process_onion_message_handler_events;
305 // Note that the PeerManager::process_events may block on ChannelManager's locks,
306 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
307 // we want to ensure we get into `persist_manager` as quickly as we can, especially
308 // without running the normal event processing above and handing events to users.
310 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
311 // processing a message effectively at any point during this loop. In order to
312 // minimize the time between such processing completing and persisting the updated
313 // ChannelManager, we want to minimize methods blocking on a ChannelManager
314 // generally, and as a fallback place such blocking only immediately before
316 $peer_manager.as_ref().process_events();
318 // Exit the loop if the background processor was requested to stop.
319 if $loop_exit_check {
320 log_trace!($logger, "Terminating background processor.");
324 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
325 // see `await_start`'s use below.
326 let mut await_start = None;
327 if $check_slow_await { await_start = Some($get_timer(1)); }
329 let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
331 // Exit the loop if the background processor was requested to stop.
332 if $loop_exit_check {
333 log_trace!($logger, "Terminating background processor.");
337 if $channel_manager.get_and_clear_needs_persistence() {
338 log_trace!($logger, "Persisting ChannelManager...");
339 $persister.persist_manager(&*$channel_manager)?;
340 log_trace!($logger, "Done persisting ChannelManager.");
342 if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
343 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
344 $channel_manager.timer_tick_occurred();
345 last_freshness_call = $get_timer(FRESHNESS_TIMER);
347 if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
348 log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
349 $peer_manager.onion_message_handler().timer_tick_occurred();
350 last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
353 // On various platforms, we may be starved of CPU cycles for several reasons.
354 // E.g. on iOS, if we've been in the background, we will be entirely paused.
355 // Similarly, if we're on a desktop platform and the device has been asleep, we
356 // may not get any cycles.
357 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
358 // full second, at which point we assume sockets may have been killed (they
359 // appear to be at least on some platforms, even if it has only been a second).
360 // Note that we have to take care to not get here just because user event
361 // processing was slow at the top of the loop. For example, the sample client
362 // may call Bitcoin Core RPCs during event handling, which very often takes
363 // more than a handful of seconds to complete, and shouldn't disconnect all our
365 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
366 $peer_manager.as_ref().disconnect_all_peers();
367 last_ping_call = $get_timer(PING_TIMER);
368 } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
369 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
370 $peer_manager.as_ref().timer_tick_occurred();
371 last_ping_call = $get_timer(PING_TIMER);
374 // Note that we want to run a graph prune once not long after startup before
375 // falling back to our usual hourly prunes. This avoids short-lived clients never
376 // pruning their network graph. We run once 60 seconds after startup before
377 // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
378 // we prune after an initial sync completes.
379 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
380 let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
381 let should_prune = match $gossip_sync {
382 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
383 _ => prune_timer_elapsed,
386 // The network graph must not be pruned while rapid sync completion is pending
387 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
388 if let Some(duration_since_epoch) = $time_fetch() {
389 log_trace!($logger, "Pruning and persisting network graph.");
390 network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
392 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
393 log_trace!($logger, "Persisting network graph.");
396 if let Err(e) = $persister.persist_graph(network_graph) {
397 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
402 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
403 last_prune_call = $get_timer(prune_timer);
406 if !have_decayed_scorer {
407 if let Some(ref scorer) = $scorer {
408 if let Some(duration_since_epoch) = $time_fetch() {
409 scorer.write_lock().decay_liquidity_certainty(duration_since_epoch);
412 have_decayed_scorer = true;
415 if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
416 if let Some(ref scorer) = $scorer {
417 if let Some(duration_since_epoch) = $time_fetch() {
418 log_trace!($logger, "Decaying and persisting scorer");
419 scorer.write_lock().decay_liquidity_certainty(duration_since_epoch);
421 log_trace!($logger, "Persisting scorer");
423 if let Err(e) = $persister.persist_scorer(&scorer) {
424 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
427 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
430 if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
431 log_trace!($logger, "Rebroadcasting monitor's pending claims");
432 $chain_monitor.rebroadcast_pending_claims();
433 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
437 // After we exit, ensure we persist the ChannelManager one final time - this avoids
438 // some races where users quit while channel updates were in-flight, with
439 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
440 $persister.persist_manager(&*$channel_manager)?;
442 // Persist Scorer on exit
443 if let Some(ref scorer) = $scorer {
444 $persister.persist_scorer(&scorer)?;
447 // Persist NetworkGraph on exit
448 if let Some(network_graph) = $gossip_sync.network_graph() {
449 $persister.persist_graph(network_graph)?;
456 #[cfg(feature = "futures")]
457 pub(crate) mod futures_util {
458 use core::future::Future;
459 use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
461 use core::marker::Unpin;
462 pub(crate) struct Selector<
463 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
469 pub(crate) enum SelectorOutput {
474 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
475 > Future for Selector<A, B, C> {
476 type Output = SelectorOutput;
477 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
478 match Pin::new(&mut self.a).poll(ctx) {
479 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
482 match Pin::new(&mut self.b).poll(ctx) {
483 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
486 match Pin::new(&mut self.c).poll(ctx) {
487 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
494 // If we want to poll a future without an async context to figure out if it has completed or
495 // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
496 // but sadly there's a good bit of boilerplate here.
497 fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
498 fn dummy_waker_action(_: *const ()) { }
500 const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
501 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
502 pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
504 #[cfg(feature = "futures")]
505 use futures_util::{Selector, SelectorOutput, dummy_waker};
506 #[cfg(feature = "futures")]
509 /// Processes background events in a future.
511 /// `sleeper` should return a future which completes in the given amount of time and returns a
512 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
513 /// future which outputs `true`, the loop will exit and this function's future will complete.
514 /// The `sleeper` future is free to return early after it has triggered the exit condition.
516 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
518 /// Requires the `futures` feature. Note that while this method is available without the `std`
519 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
520 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
521 /// manually instead.
523 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
524 /// mobile device, where we may need to check for interruption of the application regularly. If you
525 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
526 /// are hundreds or thousands of simultaneous process calls running.
528 /// The `fetch_time` parameter should return the current wall clock time, if one is available. If
529 /// no time is available, some features may be disabled, however the node will still operate fine.
531 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
532 /// could setup `process_events_async` like this:
534 /// # use lightning::io;
535 /// # use std::sync::{Arc, RwLock};
536 /// # use std::sync::atomic::{AtomicBool, Ordering};
537 /// # use std::time::SystemTime;
538 /// # use lightning_background_processor::{process_events_async, GossipSync};
539 /// # struct MyStore {}
540 /// # impl lightning::util::persist::KVStore for MyStore {
541 /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
542 /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
543 /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
544 /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
546 /// # struct MyEventHandler {}
547 /// # impl MyEventHandler {
548 /// # async fn handle_event(&self, _: lightning::events::Event) {}
550 /// # #[derive(Eq, PartialEq, Clone, Hash)]
551 /// # struct MySocketDescriptor {}
552 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
553 /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
554 /// # fn disconnect_socket(&mut self) {}
556 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
557 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
558 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
559 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
560 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
561 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
562 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
563 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
564 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
565 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
566 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
567 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
569 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
570 /// let background_persister = Arc::clone(&my_persister);
571 /// let background_event_handler = Arc::clone(&my_event_handler);
572 /// let background_chain_mon = Arc::clone(&my_chain_monitor);
573 /// let background_chan_man = Arc::clone(&my_channel_manager);
574 /// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
575 /// let background_peer_man = Arc::clone(&my_peer_manager);
576 /// let background_logger = Arc::clone(&my_logger);
577 /// let background_scorer = Arc::clone(&my_scorer);
579 /// // Setup the sleeper.
580 /// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
582 /// let sleeper = move |d| {
583 /// let mut receiver = stop_receiver.clone();
584 /// Box::pin(async move {
586 /// _ = tokio::time::sleep(d) => false,
587 /// _ = receiver.changed() => true,
592 /// let mobile_interruptable_platform = false;
594 /// let handle = tokio::spawn(async move {
595 /// process_events_async(
596 /// background_persister,
597 /// |e| background_event_handler.handle_event(e),
598 /// background_chain_mon,
599 /// background_chan_man,
600 /// background_gossip_sync,
601 /// background_peer_man,
602 /// background_logger,
603 /// Some(background_scorer),
605 /// mobile_interruptable_platform,
606 /// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
609 /// .expect("Failed to process events");
612 /// // Stop the background processing.
613 /// stop_sender.send(()).unwrap();
614 /// handle.await.unwrap();
617 #[cfg(feature = "futures")]
618 pub async fn process_events_async<
620 UL: 'static + Deref + Send + Sync,
621 CF: 'static + Deref + Send + Sync,
622 CW: 'static + Deref + Send + Sync,
623 T: 'static + Deref + Send + Sync,
624 ES: 'static + Deref + Send + Sync,
625 NS: 'static + Deref + Send + Sync,
626 SP: 'static + Deref + Send + Sync,
627 F: 'static + Deref + Send + Sync,
628 R: 'static + Deref + Send + Sync,
629 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
630 L: 'static + Deref + Send + Sync,
631 P: 'static + Deref + Send + Sync,
632 EventHandlerFuture: core::future::Future<Output = ()>,
633 EventHandler: Fn(Event) -> EventHandlerFuture,
634 PS: 'static + Deref + Send,
635 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
636 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
637 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
638 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
639 PM: 'static + Deref + Send + Sync,
640 S: 'static + Deref<Target = SC> + Send + Sync,
641 SC: for<'b> WriteableScore<'b>,
642 SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
643 Sleeper: Fn(Duration) -> SleepFuture,
644 FetchTime: Fn() -> Option<Duration>,
646 persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
647 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
648 sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
649 ) -> Result<(), lightning::io::Error>
651 UL::Target: 'static + UtxoLookup,
652 CF::Target: 'static + chain::Filter,
653 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
654 T::Target: 'static + BroadcasterInterface,
655 ES::Target: 'static + EntropySource,
656 NS::Target: 'static + NodeSigner,
657 SP::Target: 'static + SignerProvider,
658 F::Target: 'static + FeeEstimator,
659 R::Target: 'static + Router,
660 L::Target: 'static + Logger,
661 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
662 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
663 PM::Target: APeerManager + Send + Sync,
665 let mut should_break = false;
666 let async_event_handler = |event| {
667 let network_graph = gossip_sync.network_graph();
668 let event_handler = &event_handler;
669 let scorer = &scorer;
670 let logger = &logger;
671 let persister = &persister;
672 let fetch_time = &fetch_time;
674 if let Some(network_graph) = network_graph {
675 handle_network_graph_update(network_graph, &event)
677 if let Some(ref scorer) = scorer {
678 if let Some(duration_since_epoch) = fetch_time() {
679 if update_scorer(scorer, &event, duration_since_epoch) {
680 log_trace!(logger, "Persisting scorer after update");
681 if let Err(e) = persister.persist_scorer(&scorer) {
682 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
687 event_handler(event).await;
691 persister, chain_monitor,
692 chain_monitor.process_pending_events_async(async_event_handler).await,
693 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
694 peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
695 gossip_sync, logger, scorer, should_break, {
697 a: channel_manager.get_event_or_persistence_needed_future(),
698 b: chain_monitor.get_update_future(),
699 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
702 SelectorOutput::A|SelectorOutput::B => {},
703 SelectorOutput::C(exit) => {
707 }, |t| sleeper(Duration::from_secs(t)),
708 |fut: &mut SleepFuture, _| {
709 let mut waker = dummy_waker();
710 let mut ctx = task::Context::from_waker(&mut waker);
711 match core::pin::Pin::new(fut).poll(&mut ctx) {
712 task::Poll::Ready(exit) => { should_break = exit; true },
713 task::Poll::Pending => false,
715 }, mobile_interruptable_platform, fetch_time,
719 #[cfg(feature = "futures")]
720 async fn process_onion_message_handler_events_async<
721 EventHandlerFuture: core::future::Future<Output = ()>,
722 EventHandler: Fn(Event) -> EventHandlerFuture,
723 PM: 'static + Deref + Send + Sync,
725 peer_manager: &PM, handler: EventHandler
728 PM::Target: APeerManager + Send + Sync,
730 use lightning::events::EventsProvider;
732 let events = core::cell::RefCell::new(Vec::new());
733 peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
735 for event in events.into_inner() {
740 #[cfg(feature = "std")]
741 impl BackgroundProcessor {
742 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
745 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
746 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
747 /// either [`join`] or [`stop`].
749 /// # Data Persistence
751 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
752 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
753 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
754 /// provided implementation.
756 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
757 /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
758 /// See the `lightning-persister` crate for LDK's provided implementation.
760 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
761 /// error or call [`join`] and handle any error that may arise. For the latter case,
762 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
766 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
767 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
768 /// functionality implemented by other handlers.
769 /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
771 /// # Rapid Gossip Sync
773 /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
774 /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
775 /// until the [`RapidGossipSync`] instance completes its first sync.
777 /// [top-level documentation]: BackgroundProcessor
778 /// [`join`]: Self::join
779 /// [`stop`]: Self::stop
780 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
781 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
782 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
783 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
784 /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
785 /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
788 UL: 'static + Deref + Send + Sync,
789 CF: 'static + Deref + Send + Sync,
790 CW: 'static + Deref + Send + Sync,
791 T: 'static + Deref + Send + Sync,
792 ES: 'static + Deref + Send + Sync,
793 NS: 'static + Deref + Send + Sync,
794 SP: 'static + Deref + Send + Sync,
795 F: 'static + Deref + Send + Sync,
796 R: 'static + Deref + Send + Sync,
797 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
798 L: 'static + Deref + Send + Sync,
799 P: 'static + Deref + Send + Sync,
800 EH: 'static + EventHandler + Send,
801 PS: 'static + Deref + Send,
802 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
803 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
804 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
805 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
806 PM: 'static + Deref + Send + Sync,
807 S: 'static + Deref<Target = SC> + Send + Sync,
808 SC: for <'b> WriteableScore<'b>,
810 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
811 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
814 UL::Target: 'static + UtxoLookup,
815 CF::Target: 'static + chain::Filter,
816 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
817 T::Target: 'static + BroadcasterInterface,
818 ES::Target: 'static + EntropySource,
819 NS::Target: 'static + NodeSigner,
820 SP::Target: 'static + SignerProvider,
821 F::Target: 'static + FeeEstimator,
822 R::Target: 'static + Router,
823 L::Target: 'static + Logger,
824 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
825 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
826 PM::Target: APeerManager + Send + Sync,
828 let stop_thread = Arc::new(AtomicBool::new(false));
829 let stop_thread_clone = stop_thread.clone();
830 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
831 let event_handler = |event| {
832 let network_graph = gossip_sync.network_graph();
833 if let Some(network_graph) = network_graph {
834 handle_network_graph_update(network_graph, &event)
836 if let Some(ref scorer) = scorer {
837 use std::time::SystemTime;
838 let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
839 .expect("Time should be sometime after 1970");
840 if update_scorer(scorer, &event, duration_since_epoch) {
841 log_trace!(logger, "Persisting scorer after update");
842 if let Err(e) = persister.persist_scorer(&scorer) {
843 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
847 event_handler.handle_event(event);
850 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
851 channel_manager, channel_manager.process_pending_events(&event_handler),
853 peer_manager.onion_message_handler().process_pending_events(&event_handler),
854 gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
855 { Sleeper::from_two_futures(
856 channel_manager.get_event_or_persistence_needed_future(),
857 chain_monitor.get_update_future()
858 ).wait_timeout(Duration::from_millis(100)); },
859 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
861 use std::time::SystemTime;
862 Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
863 .expect("Time should be sometime after 1970"))
867 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
870 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
871 /// [`ChannelManager`].
875 /// This function panics if the background thread has panicked such as while persisting or
878 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
879 pub fn join(mut self) -> Result<(), std::io::Error> {
880 assert!(self.thread_handle.is_some());
884 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
885 /// [`ChannelManager`].
889 /// This function panics if the background thread has panicked such as while persisting or
892 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
893 pub fn stop(mut self) -> Result<(), std::io::Error> {
894 assert!(self.thread_handle.is_some());
895 self.stop_and_join_thread()
898 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
899 self.stop_thread.store(true, Ordering::Release);
903 fn join_thread(&mut self) -> Result<(), std::io::Error> {
904 match self.thread_handle.take() {
905 Some(handle) => handle.join().unwrap(),
911 #[cfg(feature = "std")]
912 impl Drop for BackgroundProcessor {
914 self.stop_and_join_thread().unwrap();
918 #[cfg(all(feature = "std", test))]
920 use bitcoin::blockdata::constants::{genesis_block, ChainHash};
921 use bitcoin::blockdata::locktime::absolute::LockTime;
922 use bitcoin::blockdata::transaction::{Transaction, TxOut};
923 use bitcoin::network::constants::Network;
924 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
925 use lightning::chain::{BestBlock, Confirm, chainmonitor};
926 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
927 use lightning::sign::{InMemorySigner, KeysManager};
928 use lightning::chain::transaction::OutPoint;
929 use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
930 use lightning::{get_event_msg, get_event};
931 use lightning::ln::PaymentHash;
932 use lightning::ln::channelmanager;
933 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
934 use lightning::ln::features::{ChannelFeatures, NodeFeatures};
935 use lightning::ln::functional_test_utils::*;
936 use lightning::ln::msgs::{ChannelMessageHandler, Init};
937 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
938 use lightning::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync};
939 use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
940 use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
941 use lightning::util::config::UserConfig;
942 use lightning::util::ser::Writeable;
943 use lightning::util::test_utils;
944 use lightning::util::persist::{KVStore,
945 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
946 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
947 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
948 use lightning_persister::fs_store::FilesystemStore;
949 use std::collections::VecDeque;
951 use std::path::PathBuf;
952 use std::sync::{Arc, Mutex};
953 use std::sync::mpsc::SyncSender;
954 use std::time::Duration;
955 use lightning_rapid_gossip_sync::RapidGossipSync;
956 use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
958 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
960 #[derive(Clone, Hash, PartialEq, Eq)]
961 struct TestDescriptor{}
962 impl SocketDescriptor for TestDescriptor {
963 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
967 fn disconnect_socket(&mut self) {}
971 type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
972 #[cfg(not(c_bindings))]
973 type LockingWrapper<T> = Mutex<T>;
975 type ChannelManager =
976 channelmanager::ChannelManager<
978 Arc<test_utils::TestBroadcaster>,
982 Arc<test_utils::TestFeeEstimator>,
984 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
985 Arc<test_utils::TestLogger>,
986 Arc<LockingWrapper<TestScorer>>,
990 Arc<test_utils::TestLogger>>;
992 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
994 type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
995 type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
998 node: Arc<ChannelManager>,
999 p2p_gossip_sync: PGS,
1000 rapid_gossip_sync: RGS,
1001 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
1002 chain_monitor: Arc<ChainMonitor>,
1003 kv_store: Arc<FilesystemStore>,
1004 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
1005 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
1006 logger: Arc<test_utils::TestLogger>,
1007 best_block: BestBlock,
1008 scorer: Arc<LockingWrapper<TestScorer>>,
1012 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1013 GossipSync::P2P(self.p2p_gossip_sync.clone())
1016 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1017 GossipSync::Rapid(self.rapid_gossip_sync.clone())
1020 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1025 impl Drop for Node {
1026 fn drop(&mut self) {
1027 let data_dir = self.kv_store.get_data_dir();
1028 match fs::remove_dir_all(data_dir.clone()) {
1029 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
1036 graph_error: Option<(std::io::ErrorKind, &'static str)>,
1037 graph_persistence_notifier: Option<SyncSender<()>>,
1038 manager_error: Option<(std::io::ErrorKind, &'static str)>,
1039 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
1040 kv_store: FilesystemStore,
1044 fn new(data_dir: PathBuf) -> Self {
1045 let kv_store = FilesystemStore::new(data_dir);
1046 Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1049 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1050 Self { graph_error: Some((error, message)), ..self }
1053 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1054 Self { graph_persistence_notifier: Some(sender), ..self }
1057 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1058 Self { manager_error: Some((error, message)), ..self }
1061 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1062 Self { scorer_error: Some((error, message)), ..self }
1066 impl KVStore for Persister {
1067 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1068 self.kv_store.read(primary_namespace, secondary_namespace, key)
1071 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1072 if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1073 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1074 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1076 if let Some((error, message)) = self.manager_error {
1077 return Err(std::io::Error::new(error, message))
1081 if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1082 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1083 key == NETWORK_GRAPH_PERSISTENCE_KEY
1085 if let Some(sender) = &self.graph_persistence_notifier {
1086 match sender.send(()) {
1088 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1092 if let Some((error, message)) = self.graph_error {
1093 return Err(std::io::Error::new(error, message))
1097 if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1098 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1099 key == SCORER_PERSISTENCE_KEY
1101 if let Some((error, message)) = self.scorer_error {
1102 return Err(std::io::Error::new(error, message))
1106 self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1109 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1110 self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1113 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1114 self.kv_store.list(primary_namespace, secondary_namespace)
1119 event_expectations: Option<VecDeque<TestResult>>,
1124 PaymentFailure { path: Path, short_channel_id: u64 },
1125 PaymentSuccess { path: Path },
1126 ProbeFailure { path: Path },
1127 ProbeSuccess { path: Path },
1132 Self { event_expectations: None }
1135 fn expect(&mut self, expectation: TestResult) {
1136 self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1140 impl lightning::util::ser::Writeable for TestScorer {
1141 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1144 impl ScoreLookUp for TestScorer {
1145 type ScoreParams = ();
1146 fn channel_penalty_msat(
1147 &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1148 ) -> u64 { unimplemented!(); }
1151 impl ScoreUpdate for TestScorer {
1152 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
1153 if let Some(expectations) = &mut self.event_expectations {
1154 match expectations.pop_front().unwrap() {
1155 TestResult::PaymentFailure { path, short_channel_id } => {
1156 assert_eq!(actual_path, &path);
1157 assert_eq!(actual_short_channel_id, short_channel_id);
1159 TestResult::PaymentSuccess { path } => {
1160 panic!("Unexpected successful payment path: {:?}", path)
1162 TestResult::ProbeFailure { path } => {
1163 panic!("Unexpected probe failure: {:?}", path)
1165 TestResult::ProbeSuccess { path } => {
1166 panic!("Unexpected probe success: {:?}", path)
1172 fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
1173 if let Some(expectations) = &mut self.event_expectations {
1174 match expectations.pop_front().unwrap() {
1175 TestResult::PaymentFailure { path, .. } => {
1176 panic!("Unexpected payment path failure: {:?}", path)
1178 TestResult::PaymentSuccess { path } => {
1179 assert_eq!(actual_path, &path);
1181 TestResult::ProbeFailure { path } => {
1182 panic!("Unexpected probe failure: {:?}", path)
1184 TestResult::ProbeSuccess { path } => {
1185 panic!("Unexpected probe success: {:?}", path)
1191 fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
1192 if let Some(expectations) = &mut self.event_expectations {
1193 match expectations.pop_front().unwrap() {
1194 TestResult::PaymentFailure { path, .. } => {
1195 panic!("Unexpected payment path failure: {:?}", path)
1197 TestResult::PaymentSuccess { path } => {
1198 panic!("Unexpected payment path success: {:?}", path)
1200 TestResult::ProbeFailure { path } => {
1201 assert_eq!(actual_path, &path);
1203 TestResult::ProbeSuccess { path } => {
1204 panic!("Unexpected probe success: {:?}", path)
1209 fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
1210 if let Some(expectations) = &mut self.event_expectations {
1211 match expectations.pop_front().unwrap() {
1212 TestResult::PaymentFailure { path, .. } => {
1213 panic!("Unexpected payment path failure: {:?}", path)
1215 TestResult::PaymentSuccess { path } => {
1216 panic!("Unexpected payment path success: {:?}", path)
1218 TestResult::ProbeFailure { path } => {
1219 panic!("Unexpected probe failure: {:?}", path)
1221 TestResult::ProbeSuccess { path } => {
1222 assert_eq!(actual_path, &path);
1227 fn decay_liquidity_certainty(&mut self, _: Duration) {}
1231 impl lightning::routing::scoring::Score for TestScorer {}
1233 impl Drop for TestScorer {
1234 fn drop(&mut self) {
1235 if std::thread::panicking() {
1239 if let Some(event_expectations) = &self.event_expectations {
1240 if !event_expectations.is_empty() {
1241 panic!("Unsatisfied event expectations: {:?}", event_expectations);
1247 fn get_full_filepath(filepath: String, filename: String) -> String {
1248 let mut path = PathBuf::from(filepath);
1249 path.push(filename);
1250 path.to_str().unwrap().to_string()
1253 fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1254 let persist_temp_path = env::temp_dir().join(persist_dir);
1255 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1256 let network = Network::Bitcoin;
1257 let mut nodes = Vec::new();
1258 for i in 0..num_nodes {
1259 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1260 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1261 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1262 let genesis_block = genesis_block(network);
1263 let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1264 let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1265 let seed = [i as u8; 32];
1266 let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), Default::default()));
1267 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1268 let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1269 let now = Duration::from_secs(genesis_block.header.time as u64);
1270 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1271 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1272 let best_block = BestBlock::from_network(network);
1273 let params = ChainParameters { network, best_block };
1274 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1275 let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1276 let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1277 let msg_handler = MessageHandler {
1278 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1279 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1280 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1282 let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1283 let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1287 for i in 0..num_nodes {
1288 for j in (i+1)..num_nodes {
1289 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1290 features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1292 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1293 features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1298 (persist_dir, nodes)
1301 macro_rules! open_channel {
1302 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1303 begin_open_channel!($node_a, $node_b, $channel_value);
1304 let events = $node_a.node.get_and_clear_pending_events();
1305 assert_eq!(events.len(), 1);
1306 let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1307 $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1308 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1309 get_event!($node_b, Event::ChannelPending);
1310 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1311 get_event!($node_a, Event::ChannelPending);
1316 macro_rules! begin_open_channel {
1317 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1318 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1319 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1320 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1324 macro_rules! handle_funding_generation_ready {
1325 ($event: expr, $channel_value: expr) => {{
1327 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1328 assert_eq!(channel_value_satoshis, $channel_value);
1329 assert_eq!(user_channel_id, 42);
1331 let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1332 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1334 (temporary_channel_id, tx)
1336 _ => panic!("Unexpected event"),
1341 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1342 for i in 1..=depth {
1343 let prev_blockhash = node.best_block.block_hash();
1344 let height = node.best_block.height() + 1;
1345 let header = create_dummy_header(prev_blockhash, height);
1346 let txdata = vec![(0, tx)];
1347 node.best_block = BestBlock::new(header.block_hash(), height);
1350 node.node.transactions_confirmed(&header, &txdata, height);
1351 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1353 x if x == depth => {
1354 node.node.best_block_updated(&header, height);
1355 node.chain_monitor.best_block_updated(&header, height);
1361 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1362 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1366 fn test_background_processor() {
1367 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1368 // updates. Also test that when new updates are available, the manager signals that it needs
1369 // re-persistence and is successfully re-persisted.
1370 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1372 // Go through the channel creation process so that each node has something to persist. Since
1373 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1374 // avoid a race with processing events.
1375 let tx = open_channel!(nodes[0], nodes[1], 100000);
1377 // Initiate the background processors to watch each node.
1378 let data_dir = nodes[0].kv_store.get_data_dir();
1379 let persister = Arc::new(Persister::new(data_dir));
1380 let event_handler = |_: _| {};
1381 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1383 macro_rules! check_persisted_data {
1384 ($node: expr, $filepath: expr) => {
1385 let mut expected_bytes = Vec::new();
1387 expected_bytes.clear();
1388 match $node.write(&mut expected_bytes) {
1390 match std::fs::read($filepath) {
1392 if bytes == expected_bytes {
1401 Err(e) => panic!("Unexpected error: {}", e)
1407 // Check that the initial channel manager data is persisted as expected.
1408 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1409 check_persisted_data!(nodes[0].node, filepath.clone());
1412 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1415 // Force-close the channel.
1416 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1418 // Check that the force-close updates are persisted.
1419 check_persisted_data!(nodes[0].node, filepath.clone());
1421 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1424 // Check network graph is persisted
1425 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1426 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1428 // Check scorer is persisted
1429 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1430 check_persisted_data!(nodes[0].scorer, filepath.clone());
1432 if !std::thread::panicking() {
1433 bg_processor.stop().unwrap();
1438 fn test_timer_tick_called() {
1440 // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1441 // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
1442 // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
1443 // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
1444 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1445 let data_dir = nodes[0].kv_store.get_data_dir();
1446 let persister = Arc::new(Persister::new(data_dir));
1447 let event_handler = |_: _| {};
1448 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1450 let log_entries = nodes[0].logger.lines.lock().unwrap();
1451 let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1452 let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1453 let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1454 let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
1455 if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1456 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1457 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
1458 log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
1463 if !std::thread::panicking() {
1464 bg_processor.stop().unwrap();
1469 fn test_channel_manager_persist_error() {
1470 // Test that if we encounter an error during manager persistence, the thread panics.
1471 let (_, nodes) = create_nodes(2, "test_persist_error");
1472 open_channel!(nodes[0], nodes[1], 100000);
1474 let data_dir = nodes[0].kv_store.get_data_dir();
1475 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1476 let event_handler = |_: _| {};
1477 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1478 match bg_processor.join() {
1479 Ok(_) => panic!("Expected error persisting manager"),
1481 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1482 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1488 #[cfg(feature = "futures")]
1489 async fn test_channel_manager_persist_error_async() {
1490 // Test that if we encounter an error during manager persistence, the thread panics.
1491 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1492 open_channel!(nodes[0], nodes[1], 100000);
1494 let data_dir = nodes[0].kv_store.get_data_dir();
1495 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1497 let bp_future = super::process_events_async(
1498 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1499 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1500 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1501 Box::pin(async move {
1502 tokio::time::sleep(dur).await;
1505 }, false, || Some(Duration::ZERO),
1507 match bp_future.await {
1508 Ok(_) => panic!("Expected error persisting manager"),
1510 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1511 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1517 fn test_network_graph_persist_error() {
1518 // Test that if we encounter an error during network graph persistence, an error gets returned.
1519 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1520 let data_dir = nodes[0].kv_store.get_data_dir();
1521 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1522 let event_handler = |_: _| {};
1523 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1525 match bg_processor.stop() {
1526 Ok(_) => panic!("Expected error persisting network graph"),
1528 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1529 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1535 fn test_scorer_persist_error() {
1536 // Test that if we encounter an error during scorer persistence, an error gets returned.
1537 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1538 let data_dir = nodes[0].kv_store.get_data_dir();
1539 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1540 let event_handler = |_: _| {};
1541 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1543 match bg_processor.stop() {
1544 Ok(_) => panic!("Expected error persisting scorer"),
1546 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1547 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1553 fn test_background_event_handling() {
1554 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1555 let channel_value = 100000;
1556 let data_dir = nodes[0].kv_store.get_data_dir();
1557 let persister = Arc::new(Persister::new(data_dir.clone()));
1559 // Set up a background event handler for FundingGenerationReady events.
1560 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1561 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1562 let event_handler = move |event: Event| match event {
1563 Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1564 Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1565 Event::ChannelReady { .. } => {},
1566 _ => panic!("Unexpected event: {:?}", event),
1569 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1571 // Open a channel and check that the FundingGenerationReady event was handled.
1572 begin_open_channel!(nodes[0], nodes[1], channel_value);
1573 let (temporary_channel_id, funding_tx) = funding_generation_recv
1574 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1575 .expect("FundingGenerationReady not handled within deadline");
1576 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1577 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1578 get_event!(nodes[1], Event::ChannelPending);
1579 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1580 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1581 .expect("ChannelPending not handled within deadline");
1583 // Confirm the funding transaction.
1584 confirm_transaction(&mut nodes[0], &funding_tx);
1585 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1586 confirm_transaction(&mut nodes[1], &funding_tx);
1587 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1588 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1589 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1590 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1591 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1593 if !std::thread::panicking() {
1594 bg_processor.stop().unwrap();
1597 // Set up a background event handler for SpendableOutputs events.
1598 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1599 let event_handler = move |event: Event| match event {
1600 Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1601 Event::ChannelReady { .. } => {},
1602 Event::ChannelClosed { .. } => {},
1603 _ => panic!("Unexpected event: {:?}", event),
1605 let persister = Arc::new(Persister::new(data_dir));
1606 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1608 // Force close the channel and check that the SpendableOutputs event was handled.
1609 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1610 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1611 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1613 let event = receiver
1614 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1615 .expect("Events not handled within deadline");
1617 Event::SpendableOutputs { .. } => {},
1618 _ => panic!("Unexpected event: {:?}", event),
1621 if !std::thread::panicking() {
1622 bg_processor.stop().unwrap();
1627 fn test_scorer_persistence() {
1628 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1629 let data_dir = nodes[0].kv_store.get_data_dir();
1630 let persister = Arc::new(Persister::new(data_dir));
1631 let event_handler = |_: _| {};
1632 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1635 let log_entries = nodes[0].logger.lines.lock().unwrap();
1636 let expected_log = "Decaying and persisting scorer".to_string();
1637 if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1642 if !std::thread::panicking() {
1643 bg_processor.stop().unwrap();
1647 macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1648 ($nodes: expr, $receive: expr, $sleep: expr) => {
1649 let features = ChannelFeatures::empty();
1650 $nodes[0].network_graph.add_channel_from_partial_announcement(
1651 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1652 ).expect("Failed to update channel from partial announcement");
1653 let original_graph_description = $nodes[0].network_graph.to_string();
1654 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1655 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1659 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1660 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1661 if *log_entries.get(&("lightning_background_processor", loop_counter))
1664 // Wait until the loop has gone around at least twice.
1669 let initialization_input = vec![
1670 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1671 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1672 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1673 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1674 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1675 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1676 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1677 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1678 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1679 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1680 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1681 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1682 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1684 $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1686 // this should have added two channels and pruned the previous one.
1687 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1689 $receive.expect("Network graph not pruned within deadline");
1691 // all channels should now be pruned
1692 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1697 fn test_not_pruning_network_graph_until_graph_sync_completion() {
1698 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1700 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1701 let data_dir = nodes[0].kv_store.get_data_dir();
1702 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1704 let event_handler = |_: _| {};
1705 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1707 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1708 receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1709 std::thread::sleep(Duration::from_millis(1)));
1711 background_processor.stop().unwrap();
1715 #[cfg(feature = "futures")]
1716 async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1717 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1719 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1720 let data_dir = nodes[0].kv_store.get_data_dir();
1721 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1723 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1724 let bp_future = super::process_events_async(
1725 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1726 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1727 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1728 let mut exit_receiver = exit_receiver.clone();
1729 Box::pin(async move {
1731 _ = tokio::time::sleep(dur) => false,
1732 _ = exit_receiver.changed() => true,
1735 }, false, || Some(Duration::from_secs(1696300000)),
1738 let t1 = tokio::spawn(bp_future);
1739 let t2 = tokio::spawn(async move {
1740 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1743 tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1744 if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1748 }, tokio::time::sleep(Duration::from_millis(1)).await);
1749 exit_sender.send(()).unwrap();
1751 let (r1, r2) = tokio::join!(t1, t2);
1752 r1.unwrap().unwrap();
1756 macro_rules! do_test_payment_path_scoring {
1757 ($nodes: expr, $receive: expr) => {
1758 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1759 // that we update the scorer upon a payment path succeeding (note that the channel must be
1760 // public or else we won't score it).
1761 // A background event handler for FundingGenerationReady events must be hooked up to a
1762 // running background processor.
1763 let scored_scid = 4242;
1764 let secp_ctx = Secp256k1::new();
1765 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1766 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1768 let path = Path { hops: vec![RouteHop {
1770 node_features: NodeFeatures::empty(),
1771 short_channel_id: scored_scid,
1772 channel_features: ChannelFeatures::empty(),
1774 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1775 maybe_announced_channel: true,
1776 }], blinded_tail: None };
1778 $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1779 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1781 payment_hash: PaymentHash([42; 32]),
1782 payment_failed_permanently: false,
1783 failure: PathFailure::OnPath { network_update: None },
1785 short_channel_id: Some(scored_scid),
1787 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1789 Event::PaymentPathFailed { .. } => {},
1790 _ => panic!("Unexpected event"),
1793 // Ensure we'll score payments that were explicitly failed back by the destination as
1795 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1796 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1798 payment_hash: PaymentHash([42; 32]),
1799 payment_failed_permanently: true,
1800 failure: PathFailure::OnPath { network_update: None },
1802 short_channel_id: None,
1804 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1806 Event::PaymentPathFailed { .. } => {},
1807 _ => panic!("Unexpected event"),
1810 $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1811 $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1812 payment_id: PaymentId([42; 32]),
1816 let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1818 Event::PaymentPathSuccessful { .. } => {},
1819 _ => panic!("Unexpected event"),
1822 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1823 $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1824 payment_id: PaymentId([42; 32]),
1825 payment_hash: PaymentHash([42; 32]),
1828 let event = $receive.expect("ProbeSuccessful not handled within deadline");
1830 Event::ProbeSuccessful { .. } => {},
1831 _ => panic!("Unexpected event"),
1834 $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1835 $nodes[0].node.push_pending_event(Event::ProbeFailed {
1836 payment_id: PaymentId([42; 32]),
1837 payment_hash: PaymentHash([42; 32]),
1839 short_channel_id: Some(scored_scid),
1841 let event = $receive.expect("ProbeFailure not handled within deadline");
1843 Event::ProbeFailed { .. } => {},
1844 _ => panic!("Unexpected event"),
1850 fn test_payment_path_scoring() {
1851 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1852 let event_handler = move |event: Event| match event {
1853 Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1854 Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1855 Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1856 Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1857 _ => panic!("Unexpected event: {:?}", event),
1860 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1861 let data_dir = nodes[0].kv_store.get_data_dir();
1862 let persister = Arc::new(Persister::new(data_dir));
1863 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1865 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1867 if !std::thread::panicking() {
1868 bg_processor.stop().unwrap();
1871 let log_entries = nodes[0].logger.lines.lock().unwrap();
1872 let expected_log = "Persisting scorer after update".to_string();
1873 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1877 #[cfg(feature = "futures")]
1878 async fn test_payment_path_scoring_async() {
1879 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1880 let event_handler = move |event: Event| {
1881 let sender_ref = sender.clone();
1884 Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1885 Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1886 Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1887 Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1888 _ => panic!("Unexpected event: {:?}", event),
1893 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1894 let data_dir = nodes[0].kv_store.get_data_dir();
1895 let persister = Arc::new(Persister::new(data_dir));
1897 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1899 let bp_future = super::process_events_async(
1900 persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1901 nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1902 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1903 let mut exit_receiver = exit_receiver.clone();
1904 Box::pin(async move {
1906 _ = tokio::time::sleep(dur) => false,
1907 _ = exit_receiver.changed() => true,
1910 }, false, || Some(Duration::ZERO),
1912 let t1 = tokio::spawn(bp_future);
1913 let t2 = tokio::spawn(async move {
1914 do_test_payment_path_scoring!(nodes, receiver.recv().await);
1915 exit_sender.send(()).unwrap();
1917 let log_entries = nodes[0].logger.lines.lock().unwrap();
1918 let expected_log = "Persisting scorer after update".to_string();
1919 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1922 let (r1, r2) = tokio::join!(t1, t2);
1923 r1.unwrap().unwrap();