1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(rustdoc::broken_intra_doc_links)]
6 #![deny(rustdoc::private_intra_doc_links)]
9 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15 #[cfg(any(test, feature = "std"))]
18 #[cfg(not(feature = "std"))]
21 #[macro_use] extern crate lightning;
22 extern crate lightning_rapid_gossip_sync;
25 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
26 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
27 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
28 use lightning::events::{Event, PathFailure};
29 #[cfg(feature = "std")]
30 use lightning::events::EventHandler;
31 #[cfg(any(feature = "std", feature = "futures"))]
32 use lightning::events::EventsProvider;
34 use lightning::ln::channelmanager::ChannelManager;
35 use lightning::ln::msgs::OnionMessageHandler;
36 use lightning::ln::peer_handler::APeerManager;
37 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
38 use lightning::routing::utxo::UtxoLookup;
39 use lightning::routing::router::Router;
40 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
41 use lightning::util::logger::Logger;
42 use lightning::util::persist::Persister;
43 #[cfg(feature = "std")]
44 use lightning::util::wakers::Sleeper;
45 use lightning_rapid_gossip_sync::RapidGossipSync;
48 use core::time::Duration;
50 #[cfg(feature = "std")]
52 #[cfg(feature = "std")]
53 use core::sync::atomic::{AtomicBool, Ordering};
54 #[cfg(feature = "std")]
55 use std::thread::{self, JoinHandle};
56 #[cfg(feature = "std")]
57 use std::time::Instant;
59 #[cfg(not(feature = "std"))]
62 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
63 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
64 /// responsibilities are:
65 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
66 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
67 /// writing it to disk/backups by invoking the callback given to it at startup.
68 /// [`ChannelManager`] persistence should be done in the background.
69 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
70 /// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
71 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
72 /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
74 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
75 /// upon as doing so may result in high latency.
79 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
80 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
81 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
82 /// unilateral chain closure fees are at risk.
84 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
85 /// [`Event`]: lightning::events::Event
86 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
87 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
88 #[cfg(feature = "std")]
89 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
90 pub struct BackgroundProcessor {
91 stop_thread: Arc<AtomicBool>,
92 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
96 const FRESHNESS_TIMER: u64 = 60;
98 const FRESHNESS_TIMER: u64 = 1;
100 #[cfg(all(not(test), not(debug_assertions)))]
101 const PING_TIMER: u64 = 10;
102 /// Signature operations take a lot longer without compiler optimisations.
103 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
104 /// timeout is reached.
105 #[cfg(all(not(test), debug_assertions))]
106 const PING_TIMER: u64 = 30;
108 const PING_TIMER: u64 = 1;
111 const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
113 const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
115 /// Prune the network graph of stale entries hourly.
116 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
119 const SCORER_PERSIST_TIMER: u64 = 60 * 5;
121 const SCORER_PERSIST_TIMER: u64 = 1;
124 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
126 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
129 const REBROADCAST_TIMER: u64 = 30;
131 const REBROADCAST_TIMER: u64 = 1;
133 #[cfg(feature = "futures")]
134 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
135 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
136 #[cfg(feature = "futures")]
137 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
138 min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
140 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
142 P: Deref<Target = P2PGossipSync<G, U, L>>,
143 R: Deref<Target = RapidGossipSync<G, L>>,
144 G: Deref<Target = NetworkGraph<L>>,
148 where U::Target: UtxoLookup, L::Target: Logger {
149 /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
151 /// Rapid gossip sync from a trusted server.
158 P: Deref<Target = P2PGossipSync<G, U, L>>,
159 R: Deref<Target = RapidGossipSync<G, L>>,
160 G: Deref<Target = NetworkGraph<L>>,
163 > GossipSync<P, R, G, U, L>
164 where U::Target: UtxoLookup, L::Target: Logger {
165 fn network_graph(&self) -> Option<&G> {
167 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168 GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
169 GossipSync::None => None,
173 fn prunable_network_graph(&self) -> Option<&G> {
175 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
176 GossipSync::Rapid(gossip_sync) => {
177 if gossip_sync.is_initial_sync_complete() {
178 Some(gossip_sync.network_graph())
183 GossipSync::None => None,
188 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
189 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
190 GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
192 U::Target: UtxoLookup,
195 /// Initializes a new [`GossipSync::P2P`] variant.
196 pub fn p2p(gossip_sync: P) -> Self {
197 GossipSync::P2P(gossip_sync)
201 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
202 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
204 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
207 &'a (dyn UtxoLookup + Send + Sync),
213 /// Initializes a new [`GossipSync::Rapid`] variant.
214 pub fn rapid(gossip_sync: R) -> Self {
215 GossipSync::Rapid(gossip_sync)
219 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
222 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
223 &RapidGossipSync<&'a NetworkGraph<L>, L>,
225 &'a (dyn UtxoLookup + Send + Sync),
231 /// Initializes a new [`GossipSync::None`] variant.
232 pub fn none() -> Self {
237 fn handle_network_graph_update<L: Deref>(
238 network_graph: &NetworkGraph<L>, event: &Event
239 ) where L::Target: Logger {
240 if let Event::PaymentPathFailed {
241 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
243 network_graph.handle_network_update(upd);
247 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
249 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
250 scorer: &'a S, event: &Event, duration_since_epoch: Duration,
253 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
254 let mut score = scorer.write_lock();
255 score.payment_path_failed(path, *scid, duration_since_epoch);
257 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
258 // Reached if the destination explicitly failed it back. We treat this as a successful probe
259 // because the payment made it all the way to the destination with sufficient liquidity.
260 let mut score = scorer.write_lock();
261 score.probe_successful(path, duration_since_epoch);
263 Event::PaymentPathSuccessful { path, .. } => {
264 let mut score = scorer.write_lock();
265 score.payment_path_successful(path, duration_since_epoch);
267 Event::ProbeSuccessful { path, .. } => {
268 let mut score = scorer.write_lock();
269 score.probe_successful(path, duration_since_epoch);
271 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
272 let mut score = scorer.write_lock();
273 score.probe_failed(path, *scid, duration_since_epoch);
280 macro_rules! define_run_body {
282 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
283 $channel_manager: ident, $process_channel_manager_events: expr,
284 $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
285 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
286 $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
288 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
289 $channel_manager.timer_tick_occurred();
290 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
291 $chain_monitor.rebroadcast_pending_claims();
293 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
294 let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
295 let mut last_ping_call = $get_timer(PING_TIMER);
296 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
297 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
298 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
299 let mut have_pruned = false;
300 let mut have_decayed_scorer = false;
303 $process_channel_manager_events;
304 $process_chain_monitor_events;
305 $process_onion_message_handler_events;
307 // Note that the PeerManager::process_events may block on ChannelManager's locks,
308 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
309 // we want to ensure we get into `persist_manager` as quickly as we can, especially
310 // without running the normal event processing above and handing events to users.
312 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
313 // processing a message effectively at any point during this loop. In order to
314 // minimize the time between such processing completing and persisting the updated
315 // ChannelManager, we want to minimize methods blocking on a ChannelManager
316 // generally, and as a fallback place such blocking only immediately before
318 $peer_manager.as_ref().process_events();
320 // Exit the loop if the background processor was requested to stop.
321 if $loop_exit_check {
322 log_trace!($logger, "Terminating background processor.");
326 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
327 // see `await_start`'s use below.
328 let mut await_start = None;
329 if $check_slow_await { await_start = Some($get_timer(1)); }
331 let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
333 // Exit the loop if the background processor was requested to stop.
334 if $loop_exit_check {
335 log_trace!($logger, "Terminating background processor.");
339 if $channel_manager.get_and_clear_needs_persistence() {
340 log_trace!($logger, "Persisting ChannelManager...");
341 $persister.persist_manager(&*$channel_manager)?;
342 log_trace!($logger, "Done persisting ChannelManager.");
344 if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
345 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
346 $channel_manager.timer_tick_occurred();
347 last_freshness_call = $get_timer(FRESHNESS_TIMER);
349 if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
350 log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
351 $peer_manager.onion_message_handler().timer_tick_occurred();
352 last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
355 // On various platforms, we may be starved of CPU cycles for several reasons.
356 // E.g. on iOS, if we've been in the background, we will be entirely paused.
357 // Similarly, if we're on a desktop platform and the device has been asleep, we
358 // may not get any cycles.
359 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
360 // full second, at which point we assume sockets may have been killed (they
361 // appear to be at least on some platforms, even if it has only been a second).
362 // Note that we have to take care to not get here just because user event
363 // processing was slow at the top of the loop. For example, the sample client
364 // may call Bitcoin Core RPCs during event handling, which very often takes
365 // more than a handful of seconds to complete, and shouldn't disconnect all our
367 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
368 $peer_manager.as_ref().disconnect_all_peers();
369 last_ping_call = $get_timer(PING_TIMER);
370 } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
371 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
372 $peer_manager.as_ref().timer_tick_occurred();
373 last_ping_call = $get_timer(PING_TIMER);
376 // Note that we want to run a graph prune once not long after startup before
377 // falling back to our usual hourly prunes. This avoids short-lived clients never
378 // pruning their network graph. We run once 60 seconds after startup before
379 // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
380 // we prune after an initial sync completes.
381 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
382 let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
383 let should_prune = match $gossip_sync {
384 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
385 _ => prune_timer_elapsed,
388 // The network graph must not be pruned while rapid sync completion is pending
389 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
390 if let Some(duration_since_epoch) = $time_fetch() {
391 log_trace!($logger, "Pruning and persisting network graph.");
392 network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
394 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
395 log_trace!($logger, "Persisting network graph.");
398 if let Err(e) = $persister.persist_graph(network_graph) {
399 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
404 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
405 last_prune_call = $get_timer(prune_timer);
408 if !have_decayed_scorer {
409 if let Some(ref scorer) = $scorer {
410 if let Some(duration_since_epoch) = $time_fetch() {
411 log_trace!($logger, "Calling time_passed on scorer at startup");
412 scorer.write_lock().time_passed(duration_since_epoch);
415 have_decayed_scorer = true;
418 if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
419 if let Some(ref scorer) = $scorer {
420 if let Some(duration_since_epoch) = $time_fetch() {
421 log_trace!($logger, "Calling time_passed and persisting scorer");
422 scorer.write_lock().time_passed(duration_since_epoch);
424 log_trace!($logger, "Persisting scorer");
426 if let Err(e) = $persister.persist_scorer(&scorer) {
427 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
430 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
433 if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
434 log_trace!($logger, "Rebroadcasting monitor's pending claims");
435 $chain_monitor.rebroadcast_pending_claims();
436 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
440 // After we exit, ensure we persist the ChannelManager one final time - this avoids
441 // some races where users quit while channel updates were in-flight, with
442 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
443 $persister.persist_manager(&*$channel_manager)?;
445 // Persist Scorer on exit
446 if let Some(ref scorer) = $scorer {
447 $persister.persist_scorer(&scorer)?;
450 // Persist NetworkGraph on exit
451 if let Some(network_graph) = $gossip_sync.network_graph() {
452 $persister.persist_graph(network_graph)?;
459 #[cfg(feature = "futures")]
460 pub(crate) mod futures_util {
461 use core::future::Future;
462 use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
464 use core::marker::Unpin;
465 pub(crate) struct Selector<
466 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
472 pub(crate) enum SelectorOutput {
477 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
478 > Future for Selector<A, B, C> {
479 type Output = SelectorOutput;
480 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
481 match Pin::new(&mut self.a).poll(ctx) {
482 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
485 match Pin::new(&mut self.b).poll(ctx) {
486 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
489 match Pin::new(&mut self.c).poll(ctx) {
490 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
497 // If we want to poll a future without an async context to figure out if it has completed or
498 // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
499 // but sadly there's a good bit of boilerplate here.
500 fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
501 fn dummy_waker_action(_: *const ()) { }
503 const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
504 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
505 pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
507 #[cfg(feature = "futures")]
508 use futures_util::{Selector, SelectorOutput, dummy_waker};
509 #[cfg(feature = "futures")]
512 /// Processes background events in a future.
514 /// `sleeper` should return a future which completes in the given amount of time and returns a
515 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
516 /// future which outputs `true`, the loop will exit and this function's future will complete.
517 /// The `sleeper` future is free to return early after it has triggered the exit condition.
519 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
521 /// Requires the `futures` feature. Note that while this method is available without the `std`
522 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
523 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
524 /// manually instead.
526 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
527 /// mobile device, where we may need to check for interruption of the application regularly. If you
528 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
529 /// are hundreds or thousands of simultaneous process calls running.
531 /// The `fetch_time` parameter should return the current wall clock time, if one is available. If
532 /// no time is available, some features may be disabled, however the node will still operate fine.
534 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
535 /// could setup `process_events_async` like this:
537 /// # use lightning::io;
538 /// # use std::sync::{Arc, RwLock};
539 /// # use std::sync::atomic::{AtomicBool, Ordering};
540 /// # use std::time::SystemTime;
541 /// # use lightning_background_processor::{process_events_async, GossipSync};
542 /// # struct MyStore {}
543 /// # impl lightning::util::persist::KVStore for MyStore {
544 /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
545 /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
546 /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
547 /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
549 /// # struct MyEventHandler {}
550 /// # impl MyEventHandler {
551 /// # async fn handle_event(&self, _: lightning::events::Event) {}
553 /// # #[derive(Eq, PartialEq, Clone, Hash)]
554 /// # struct MySocketDescriptor {}
555 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
556 /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
557 /// # fn disconnect_socket(&mut self) {}
559 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
560 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
561 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
562 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
563 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
564 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
565 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
566 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
567 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
568 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
569 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
570 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
572 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
573 /// let background_persister = Arc::clone(&my_persister);
574 /// let background_event_handler = Arc::clone(&my_event_handler);
575 /// let background_chain_mon = Arc::clone(&my_chain_monitor);
576 /// let background_chan_man = Arc::clone(&my_channel_manager);
577 /// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
578 /// let background_peer_man = Arc::clone(&my_peer_manager);
579 /// let background_logger = Arc::clone(&my_logger);
580 /// let background_scorer = Arc::clone(&my_scorer);
582 /// // Setup the sleeper.
583 /// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
585 /// let sleeper = move |d| {
586 /// let mut receiver = stop_receiver.clone();
587 /// Box::pin(async move {
589 /// _ = tokio::time::sleep(d) => false,
590 /// _ = receiver.changed() => true,
595 /// let mobile_interruptable_platform = false;
597 /// let handle = tokio::spawn(async move {
598 /// process_events_async(
599 /// background_persister,
600 /// |e| background_event_handler.handle_event(e),
601 /// background_chain_mon,
602 /// background_chan_man,
603 /// background_gossip_sync,
604 /// background_peer_man,
605 /// background_logger,
606 /// Some(background_scorer),
608 /// mobile_interruptable_platform,
609 /// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
612 /// .expect("Failed to process events");
615 /// // Stop the background processing.
616 /// stop_sender.send(()).unwrap();
617 /// handle.await.unwrap();
620 #[cfg(feature = "futures")]
621 pub async fn process_events_async<
623 UL: 'static + Deref + Send + Sync,
624 CF: 'static + Deref + Send + Sync,
625 CW: 'static + Deref + Send + Sync,
626 T: 'static + Deref + Send + Sync,
627 ES: 'static + Deref + Send + Sync,
628 NS: 'static + Deref + Send + Sync,
629 SP: 'static + Deref + Send + Sync,
630 F: 'static + Deref + Send + Sync,
631 R: 'static + Deref + Send + Sync,
632 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
633 L: 'static + Deref + Send + Sync,
634 P: 'static + Deref + Send + Sync,
635 EventHandlerFuture: core::future::Future<Output = ()>,
636 EventHandler: Fn(Event) -> EventHandlerFuture,
637 PS: 'static + Deref + Send,
638 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
639 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
640 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
641 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
642 PM: 'static + Deref + Send + Sync,
643 S: 'static + Deref<Target = SC> + Send + Sync,
644 SC: for<'b> WriteableScore<'b>,
645 SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
646 Sleeper: Fn(Duration) -> SleepFuture,
647 FetchTime: Fn() -> Option<Duration>,
649 persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
650 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
651 sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
652 ) -> Result<(), lightning::io::Error>
654 UL::Target: 'static + UtxoLookup,
655 CF::Target: 'static + chain::Filter,
656 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
657 T::Target: 'static + BroadcasterInterface,
658 ES::Target: 'static + EntropySource,
659 NS::Target: 'static + NodeSigner,
660 SP::Target: 'static + SignerProvider,
661 F::Target: 'static + FeeEstimator,
662 R::Target: 'static + Router,
663 L::Target: 'static + Logger,
664 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
665 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
666 PM::Target: APeerManager + Send + Sync,
668 let mut should_break = false;
669 let async_event_handler = |event| {
670 let network_graph = gossip_sync.network_graph();
671 let event_handler = &event_handler;
672 let scorer = &scorer;
673 let logger = &logger;
674 let persister = &persister;
675 let fetch_time = &fetch_time;
677 if let Some(network_graph) = network_graph {
678 handle_network_graph_update(network_graph, &event)
680 if let Some(ref scorer) = scorer {
681 if let Some(duration_since_epoch) = fetch_time() {
682 if update_scorer(scorer, &event, duration_since_epoch) {
683 log_trace!(logger, "Persisting scorer after update");
684 if let Err(e) = persister.persist_scorer(&scorer) {
685 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
690 event_handler(event).await;
694 persister, chain_monitor,
695 chain_monitor.process_pending_events_async(async_event_handler).await,
696 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
697 peer_manager, process_onion_message_handler_events_async(&peer_manager, async_event_handler).await,
698 gossip_sync, logger, scorer, should_break, {
700 a: channel_manager.get_event_or_persistence_needed_future(),
701 b: chain_monitor.get_update_future(),
702 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
705 SelectorOutput::A|SelectorOutput::B => {},
706 SelectorOutput::C(exit) => {
710 }, |t| sleeper(Duration::from_secs(t)),
711 |fut: &mut SleepFuture, _| {
712 let mut waker = dummy_waker();
713 let mut ctx = task::Context::from_waker(&mut waker);
714 match core::pin::Pin::new(fut).poll(&mut ctx) {
715 task::Poll::Ready(exit) => { should_break = exit; true },
716 task::Poll::Pending => false,
718 }, mobile_interruptable_platform, fetch_time,
722 #[cfg(feature = "futures")]
723 async fn process_onion_message_handler_events_async<
724 EventHandlerFuture: core::future::Future<Output = ()>,
725 EventHandler: Fn(Event) -> EventHandlerFuture,
726 PM: 'static + Deref + Send + Sync,
728 peer_manager: &PM, handler: EventHandler
731 PM::Target: APeerManager + Send + Sync,
733 let events = core::cell::RefCell::new(Vec::new());
734 peer_manager.onion_message_handler().process_pending_events(&|e| events.borrow_mut().push(e));
736 for event in events.into_inner() {
741 #[cfg(feature = "std")]
742 impl BackgroundProcessor {
743 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
746 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
747 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
748 /// either [`join`] or [`stop`].
750 /// # Data Persistence
752 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
753 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
754 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
755 /// provided implementation.
757 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
758 /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
759 /// See the `lightning-persister` crate for LDK's provided implementation.
761 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
762 /// error or call [`join`] and handle any error that may arise. For the latter case,
763 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
767 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
768 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
769 /// functionality implemented by other handlers.
770 /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
772 /// # Rapid Gossip Sync
774 /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
775 /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
776 /// until the [`RapidGossipSync`] instance completes its first sync.
778 /// [top-level documentation]: BackgroundProcessor
779 /// [`join`]: Self::join
780 /// [`stop`]: Self::stop
781 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
782 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
783 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
784 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
785 /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
786 /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
789 UL: 'static + Deref + Send + Sync,
790 CF: 'static + Deref + Send + Sync,
791 CW: 'static + Deref + Send + Sync,
792 T: 'static + Deref + Send + Sync,
793 ES: 'static + Deref + Send + Sync,
794 NS: 'static + Deref + Send + Sync,
795 SP: 'static + Deref + Send + Sync,
796 F: 'static + Deref + Send + Sync,
797 R: 'static + Deref + Send + Sync,
798 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
799 L: 'static + Deref + Send + Sync,
800 P: 'static + Deref + Send + Sync,
801 EH: 'static + EventHandler + Send,
802 PS: 'static + Deref + Send,
803 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
804 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
805 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
806 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
807 PM: 'static + Deref + Send + Sync,
808 S: 'static + Deref<Target = SC> + Send + Sync,
809 SC: for <'b> WriteableScore<'b>,
811 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
812 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
815 UL::Target: 'static + UtxoLookup,
816 CF::Target: 'static + chain::Filter,
817 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
818 T::Target: 'static + BroadcasterInterface,
819 ES::Target: 'static + EntropySource,
820 NS::Target: 'static + NodeSigner,
821 SP::Target: 'static + SignerProvider,
822 F::Target: 'static + FeeEstimator,
823 R::Target: 'static + Router,
824 L::Target: 'static + Logger,
825 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
826 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
827 PM::Target: APeerManager + Send + Sync,
829 let stop_thread = Arc::new(AtomicBool::new(false));
830 let stop_thread_clone = stop_thread.clone();
831 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
832 let event_handler = |event| {
833 let network_graph = gossip_sync.network_graph();
834 if let Some(network_graph) = network_graph {
835 handle_network_graph_update(network_graph, &event)
837 if let Some(ref scorer) = scorer {
838 use std::time::SystemTime;
839 let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
840 .expect("Time should be sometime after 1970");
841 if update_scorer(scorer, &event, duration_since_epoch) {
842 log_trace!(logger, "Persisting scorer after update");
843 if let Err(e) = persister.persist_scorer(&scorer) {
844 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
848 event_handler.handle_event(event);
851 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
852 channel_manager, channel_manager.process_pending_events(&event_handler),
854 peer_manager.onion_message_handler().process_pending_events(&event_handler),
855 gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
856 { Sleeper::from_two_futures(
857 channel_manager.get_event_or_persistence_needed_future(),
858 chain_monitor.get_update_future()
859 ).wait_timeout(Duration::from_millis(100)); },
860 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
862 use std::time::SystemTime;
863 Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
864 .expect("Time should be sometime after 1970"))
868 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
871 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
872 /// [`ChannelManager`].
876 /// This function panics if the background thread has panicked such as while persisting or
879 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
880 pub fn join(mut self) -> Result<(), std::io::Error> {
881 assert!(self.thread_handle.is_some());
885 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
886 /// [`ChannelManager`].
890 /// This function panics if the background thread has panicked such as while persisting or
893 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
894 pub fn stop(mut self) -> Result<(), std::io::Error> {
895 assert!(self.thread_handle.is_some());
896 self.stop_and_join_thread()
899 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
900 self.stop_thread.store(true, Ordering::Release);
904 fn join_thread(&mut self) -> Result<(), std::io::Error> {
905 match self.thread_handle.take() {
906 Some(handle) => handle.join().unwrap(),
912 #[cfg(feature = "std")]
913 impl Drop for BackgroundProcessor {
915 self.stop_and_join_thread().unwrap();
919 #[cfg(all(feature = "std", test))]
921 use bitcoin::blockdata::constants::{genesis_block, ChainHash};
922 use bitcoin::blockdata::locktime::absolute::LockTime;
923 use bitcoin::blockdata::transaction::{Transaction, TxOut};
924 use bitcoin::network::constants::Network;
925 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
926 use lightning::chain::{BestBlock, Confirm, chainmonitor};
927 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
928 use lightning::sign::{InMemorySigner, KeysManager};
929 use lightning::chain::transaction::OutPoint;
930 use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
931 use lightning::{get_event_msg, get_event};
932 use lightning::ln::PaymentHash;
933 use lightning::ln::channelmanager;
934 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
935 use lightning::ln::features::{ChannelFeatures, NodeFeatures};
936 use lightning::ln::functional_test_utils::*;
937 use lightning::ln::msgs::{ChannelMessageHandler, Init};
938 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
939 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
940 use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
941 use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
942 use lightning::util::config::UserConfig;
943 use lightning::util::ser::Writeable;
944 use lightning::util::test_utils;
945 use lightning::util::persist::{KVStore,
946 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
947 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
948 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
949 use lightning_persister::fs_store::FilesystemStore;
950 use std::collections::VecDeque;
952 use std::path::PathBuf;
953 use std::sync::{Arc, Mutex};
954 use std::sync::mpsc::SyncSender;
955 use std::time::Duration;
956 use lightning_rapid_gossip_sync::RapidGossipSync;
957 use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
959 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
961 #[derive(Clone, Hash, PartialEq, Eq)]
962 struct TestDescriptor{}
963 impl SocketDescriptor for TestDescriptor {
964 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
968 fn disconnect_socket(&mut self) {}
972 type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
973 #[cfg(not(c_bindings))]
974 type LockingWrapper<T> = Mutex<T>;
976 type ChannelManager =
977 channelmanager::ChannelManager<
979 Arc<test_utils::TestBroadcaster>,
983 Arc<test_utils::TestFeeEstimator>,
985 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
986 Arc<test_utils::TestLogger>,
988 Arc<LockingWrapper<TestScorer>>,
992 Arc<test_utils::TestLogger>>;
994 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
996 type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
997 type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
1000 node: Arc<ChannelManager>,
1001 p2p_gossip_sync: PGS,
1002 rapid_gossip_sync: RGS,
1003 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
1004 chain_monitor: Arc<ChainMonitor>,
1005 kv_store: Arc<FilesystemStore>,
1006 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
1007 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
1008 logger: Arc<test_utils::TestLogger>,
1009 best_block: BestBlock,
1010 scorer: Arc<LockingWrapper<TestScorer>>,
1014 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1015 GossipSync::P2P(self.p2p_gossip_sync.clone())
1018 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1019 GossipSync::Rapid(self.rapid_gossip_sync.clone())
1022 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1027 impl Drop for Node {
1028 fn drop(&mut self) {
1029 let data_dir = self.kv_store.get_data_dir();
1030 match fs::remove_dir_all(data_dir.clone()) {
1031 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
1038 graph_error: Option<(std::io::ErrorKind, &'static str)>,
1039 graph_persistence_notifier: Option<SyncSender<()>>,
1040 manager_error: Option<(std::io::ErrorKind, &'static str)>,
1041 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
1042 kv_store: FilesystemStore,
1046 fn new(data_dir: PathBuf) -> Self {
1047 let kv_store = FilesystemStore::new(data_dir);
1048 Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1051 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1052 Self { graph_error: Some((error, message)), ..self }
1055 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1056 Self { graph_persistence_notifier: Some(sender), ..self }
1059 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1060 Self { manager_error: Some((error, message)), ..self }
1063 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1064 Self { scorer_error: Some((error, message)), ..self }
1068 impl KVStore for Persister {
1069 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1070 self.kv_store.read(primary_namespace, secondary_namespace, key)
1073 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1074 if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1075 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1076 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1078 if let Some((error, message)) = self.manager_error {
1079 return Err(std::io::Error::new(error, message))
1083 if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1084 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1085 key == NETWORK_GRAPH_PERSISTENCE_KEY
1087 if let Some(sender) = &self.graph_persistence_notifier {
1088 match sender.send(()) {
1090 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1094 if let Some((error, message)) = self.graph_error {
1095 return Err(std::io::Error::new(error, message))
1099 if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1100 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1101 key == SCORER_PERSISTENCE_KEY
1103 if let Some((error, message)) = self.scorer_error {
1104 return Err(std::io::Error::new(error, message))
1108 self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1111 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1112 self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1115 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1116 self.kv_store.list(primary_namespace, secondary_namespace)
1121 event_expectations: Option<VecDeque<TestResult>>,
1126 PaymentFailure { path: Path, short_channel_id: u64 },
1127 PaymentSuccess { path: Path },
1128 ProbeFailure { path: Path },
1129 ProbeSuccess { path: Path },
1134 Self { event_expectations: None }
1137 fn expect(&mut self, expectation: TestResult) {
1138 self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1142 impl lightning::util::ser::Writeable for TestScorer {
1143 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1146 impl ScoreLookUp for TestScorer {
1147 type ScoreParams = ();
1148 fn channel_penalty_msat(
1149 &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &Self::ScoreParams
1150 ) -> u64 { unimplemented!(); }
1153 impl ScoreUpdate for TestScorer {
1154 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
1155 if let Some(expectations) = &mut self.event_expectations {
1156 match expectations.pop_front().unwrap() {
1157 TestResult::PaymentFailure { path, short_channel_id } => {
1158 assert_eq!(actual_path, &path);
1159 assert_eq!(actual_short_channel_id, short_channel_id);
1161 TestResult::PaymentSuccess { path } => {
1162 panic!("Unexpected successful payment path: {:?}", path)
1164 TestResult::ProbeFailure { path } => {
1165 panic!("Unexpected probe failure: {:?}", path)
1167 TestResult::ProbeSuccess { path } => {
1168 panic!("Unexpected probe success: {:?}", path)
1174 fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
1175 if let Some(expectations) = &mut self.event_expectations {
1176 match expectations.pop_front().unwrap() {
1177 TestResult::PaymentFailure { path, .. } => {
1178 panic!("Unexpected payment path failure: {:?}", path)
1180 TestResult::PaymentSuccess { path } => {
1181 assert_eq!(actual_path, &path);
1183 TestResult::ProbeFailure { path } => {
1184 panic!("Unexpected probe failure: {:?}", path)
1186 TestResult::ProbeSuccess { path } => {
1187 panic!("Unexpected probe success: {:?}", path)
1193 fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
1194 if let Some(expectations) = &mut self.event_expectations {
1195 match expectations.pop_front().unwrap() {
1196 TestResult::PaymentFailure { path, .. } => {
1197 panic!("Unexpected payment path failure: {:?}", path)
1199 TestResult::PaymentSuccess { path } => {
1200 panic!("Unexpected payment path success: {:?}", path)
1202 TestResult::ProbeFailure { path } => {
1203 assert_eq!(actual_path, &path);
1205 TestResult::ProbeSuccess { path } => {
1206 panic!("Unexpected probe success: {:?}", path)
1211 fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
1212 if let Some(expectations) = &mut self.event_expectations {
1213 match expectations.pop_front().unwrap() {
1214 TestResult::PaymentFailure { path, .. } => {
1215 panic!("Unexpected payment path failure: {:?}", path)
1217 TestResult::PaymentSuccess { path } => {
1218 panic!("Unexpected payment path success: {:?}", path)
1220 TestResult::ProbeFailure { path } => {
1221 panic!("Unexpected probe failure: {:?}", path)
1223 TestResult::ProbeSuccess { path } => {
1224 assert_eq!(actual_path, &path);
1229 fn time_passed(&mut self, _: Duration) {}
1233 impl lightning::routing::scoring::Score for TestScorer {}
1235 impl Drop for TestScorer {
1236 fn drop(&mut self) {
1237 if std::thread::panicking() {
1241 if let Some(event_expectations) = &self.event_expectations {
1242 if !event_expectations.is_empty() {
1243 panic!("Unsatisfied event expectations: {:?}", event_expectations);
1249 fn get_full_filepath(filepath: String, filename: String) -> String {
1250 let mut path = PathBuf::from(filepath);
1251 path.push(filename);
1252 path.to_str().unwrap().to_string()
1255 fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1256 let persist_temp_path = env::temp_dir().join(persist_dir);
1257 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1258 let network = Network::Bitcoin;
1259 let mut nodes = Vec::new();
1260 for i in 0..num_nodes {
1261 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1262 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1263 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1264 let genesis_block = genesis_block(network);
1265 let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1266 let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1267 let now = Duration::from_secs(genesis_block.header.time as u64);
1268 let seed = [i as u8; 32];
1269 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1270 let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), Arc::clone(&keys_manager), scorer.clone(), Default::default()));
1271 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1272 let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1273 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1274 let best_block = BestBlock::from_network(network);
1275 let params = ChainParameters { network, best_block };
1276 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1277 let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1278 let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1279 let msg_handler = MessageHandler {
1280 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1281 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1282 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1284 let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1285 let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1289 for i in 0..num_nodes {
1290 for j in (i+1)..num_nodes {
1291 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1292 features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1294 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1295 features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1300 (persist_dir, nodes)
1303 macro_rules! open_channel {
1304 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1305 begin_open_channel!($node_a, $node_b, $channel_value);
1306 let events = $node_a.node.get_and_clear_pending_events();
1307 assert_eq!(events.len(), 1);
1308 let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1309 $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1310 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1311 get_event!($node_b, Event::ChannelPending);
1312 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1313 get_event!($node_a, Event::ChannelPending);
1318 macro_rules! begin_open_channel {
1319 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1320 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1321 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1322 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1326 macro_rules! handle_funding_generation_ready {
1327 ($event: expr, $channel_value: expr) => {{
1329 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1330 assert_eq!(channel_value_satoshis, $channel_value);
1331 assert_eq!(user_channel_id, 42);
1333 let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1334 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1336 (temporary_channel_id, tx)
1338 _ => panic!("Unexpected event"),
1343 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1344 for i in 1..=depth {
1345 let prev_blockhash = node.best_block.block_hash();
1346 let height = node.best_block.height() + 1;
1347 let header = create_dummy_header(prev_blockhash, height);
1348 let txdata = vec![(0, tx)];
1349 node.best_block = BestBlock::new(header.block_hash(), height);
1352 node.node.transactions_confirmed(&header, &txdata, height);
1353 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1355 x if x == depth => {
1356 node.node.best_block_updated(&header, height);
1357 node.chain_monitor.best_block_updated(&header, height);
1363 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1364 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1368 fn test_background_processor() {
1369 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1370 // updates. Also test that when new updates are available, the manager signals that it needs
1371 // re-persistence and is successfully re-persisted.
1372 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1374 // Go through the channel creation process so that each node has something to persist. Since
1375 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1376 // avoid a race with processing events.
1377 let tx = open_channel!(nodes[0], nodes[1], 100000);
1379 // Initiate the background processors to watch each node.
1380 let data_dir = nodes[0].kv_store.get_data_dir();
1381 let persister = Arc::new(Persister::new(data_dir));
1382 let event_handler = |_: _| {};
1383 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1385 macro_rules! check_persisted_data {
1386 ($node: expr, $filepath: expr) => {
1387 let mut expected_bytes = Vec::new();
1389 expected_bytes.clear();
1390 match $node.write(&mut expected_bytes) {
1392 match std::fs::read($filepath) {
1394 if bytes == expected_bytes {
1403 Err(e) => panic!("Unexpected error: {}", e)
1409 // Check that the initial channel manager data is persisted as expected.
1410 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1411 check_persisted_data!(nodes[0].node, filepath.clone());
1414 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1417 // Force-close the channel.
1418 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1420 // Check that the force-close updates are persisted.
1421 check_persisted_data!(nodes[0].node, filepath.clone());
1423 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1426 // Check network graph is persisted
1427 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1428 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1430 // Check scorer is persisted
1431 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1432 check_persisted_data!(nodes[0].scorer, filepath.clone());
1434 if !std::thread::panicking() {
1435 bg_processor.stop().unwrap();
1440 fn test_timer_tick_called() {
1442 // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1443 // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
1444 // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
1445 // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
1446 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1447 let data_dir = nodes[0].kv_store.get_data_dir();
1448 let persister = Arc::new(Persister::new(data_dir));
1449 let event_handler = |_: _| {};
1450 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1452 let log_entries = nodes[0].logger.lines.lock().unwrap();
1453 let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1454 let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1455 let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1456 let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
1457 if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1458 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1459 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
1460 log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
1465 if !std::thread::panicking() {
1466 bg_processor.stop().unwrap();
1471 fn test_channel_manager_persist_error() {
1472 // Test that if we encounter an error during manager persistence, the thread panics.
1473 let (_, nodes) = create_nodes(2, "test_persist_error");
1474 open_channel!(nodes[0], nodes[1], 100000);
1476 let data_dir = nodes[0].kv_store.get_data_dir();
1477 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1478 let event_handler = |_: _| {};
1479 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1480 match bg_processor.join() {
1481 Ok(_) => panic!("Expected error persisting manager"),
1483 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1484 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1490 #[cfg(feature = "futures")]
1491 async fn test_channel_manager_persist_error_async() {
1492 // Test that if we encounter an error during manager persistence, the thread panics.
1493 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1494 open_channel!(nodes[0], nodes[1], 100000);
1496 let data_dir = nodes[0].kv_store.get_data_dir();
1497 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1499 let bp_future = super::process_events_async(
1500 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1501 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1502 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1503 Box::pin(async move {
1504 tokio::time::sleep(dur).await;
1507 }, false, || Some(Duration::ZERO),
1509 match bp_future.await {
1510 Ok(_) => panic!("Expected error persisting manager"),
1512 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1513 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1519 fn test_network_graph_persist_error() {
1520 // Test that if we encounter an error during network graph persistence, an error gets returned.
1521 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1522 let data_dir = nodes[0].kv_store.get_data_dir();
1523 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1524 let event_handler = |_: _| {};
1525 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1527 match bg_processor.stop() {
1528 Ok(_) => panic!("Expected error persisting network graph"),
1530 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1531 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1537 fn test_scorer_persist_error() {
1538 // Test that if we encounter an error during scorer persistence, an error gets returned.
1539 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1540 let data_dir = nodes[0].kv_store.get_data_dir();
1541 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1542 let event_handler = |_: _| {};
1543 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1545 match bg_processor.stop() {
1546 Ok(_) => panic!("Expected error persisting scorer"),
1548 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1549 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1555 fn test_background_event_handling() {
1556 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1557 let channel_value = 100000;
1558 let data_dir = nodes[0].kv_store.get_data_dir();
1559 let persister = Arc::new(Persister::new(data_dir.clone()));
1561 // Set up a background event handler for FundingGenerationReady events.
1562 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1563 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1564 let event_handler = move |event: Event| match event {
1565 Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1566 Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1567 Event::ChannelReady { .. } => {},
1568 _ => panic!("Unexpected event: {:?}", event),
1571 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1573 // Open a channel and check that the FundingGenerationReady event was handled.
1574 begin_open_channel!(nodes[0], nodes[1], channel_value);
1575 let (temporary_channel_id, funding_tx) = funding_generation_recv
1576 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1577 .expect("FundingGenerationReady not handled within deadline");
1578 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1579 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1580 get_event!(nodes[1], Event::ChannelPending);
1581 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1582 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1583 .expect("ChannelPending not handled within deadline");
1585 // Confirm the funding transaction.
1586 confirm_transaction(&mut nodes[0], &funding_tx);
1587 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1588 confirm_transaction(&mut nodes[1], &funding_tx);
1589 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1590 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1591 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1592 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1593 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1595 if !std::thread::panicking() {
1596 bg_processor.stop().unwrap();
1599 // Set up a background event handler for SpendableOutputs events.
1600 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1601 let event_handler = move |event: Event| match event {
1602 Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1603 Event::ChannelReady { .. } => {},
1604 Event::ChannelClosed { .. } => {},
1605 _ => panic!("Unexpected event: {:?}", event),
1607 let persister = Arc::new(Persister::new(data_dir));
1608 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1610 // Force close the channel and check that the SpendableOutputs event was handled.
1611 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1612 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1613 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1615 let event = receiver
1616 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1617 .expect("Events not handled within deadline");
1619 Event::SpendableOutputs { .. } => {},
1620 _ => panic!("Unexpected event: {:?}", event),
1623 if !std::thread::panicking() {
1624 bg_processor.stop().unwrap();
1629 fn test_scorer_persistence() {
1630 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1631 let data_dir = nodes[0].kv_store.get_data_dir();
1632 let persister = Arc::new(Persister::new(data_dir));
1633 let event_handler = |_: _| {};
1634 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1637 let log_entries = nodes[0].logger.lines.lock().unwrap();
1638 let expected_log = "Calling time_passed and persisting scorer".to_string();
1639 if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1644 if !std::thread::panicking() {
1645 bg_processor.stop().unwrap();
1649 macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1650 ($nodes: expr, $receive: expr, $sleep: expr) => {
1651 let features = ChannelFeatures::empty();
1652 $nodes[0].network_graph.add_channel_from_partial_announcement(
1653 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1654 ).expect("Failed to update channel from partial announcement");
1655 let original_graph_description = $nodes[0].network_graph.to_string();
1656 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1657 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1661 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1662 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1663 if *log_entries.get(&("lightning_background_processor", loop_counter))
1666 // Wait until the loop has gone around at least twice.
1671 let initialization_input = vec![
1672 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1673 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1674 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1675 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1676 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1677 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1678 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1679 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1680 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1681 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1682 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1683 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1684 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1686 $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1688 // this should have added two channels and pruned the previous one.
1689 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1691 $receive.expect("Network graph not pruned within deadline");
1693 // all channels should now be pruned
1694 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1699 fn test_not_pruning_network_graph_until_graph_sync_completion() {
1700 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1702 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1703 let data_dir = nodes[0].kv_store.get_data_dir();
1704 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1706 let event_handler = |_: _| {};
1707 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1709 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1710 receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1711 std::thread::sleep(Duration::from_millis(1)));
1713 background_processor.stop().unwrap();
1717 #[cfg(feature = "futures")]
1718 async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1719 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1721 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1722 let data_dir = nodes[0].kv_store.get_data_dir();
1723 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1725 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1726 let bp_future = super::process_events_async(
1727 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1728 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1729 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1730 let mut exit_receiver = exit_receiver.clone();
1731 Box::pin(async move {
1733 _ = tokio::time::sleep(dur) => false,
1734 _ = exit_receiver.changed() => true,
1737 }, false, || Some(Duration::from_secs(1696300000)),
1740 let t1 = tokio::spawn(bp_future);
1741 let t2 = tokio::spawn(async move {
1742 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1745 tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1746 if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1750 }, tokio::time::sleep(Duration::from_millis(1)).await);
1751 exit_sender.send(()).unwrap();
1753 let (r1, r2) = tokio::join!(t1, t2);
1754 r1.unwrap().unwrap();
1758 macro_rules! do_test_payment_path_scoring {
1759 ($nodes: expr, $receive: expr) => {
1760 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1761 // that we update the scorer upon a payment path succeeding (note that the channel must be
1762 // public or else we won't score it).
1763 // A background event handler for FundingGenerationReady events must be hooked up to a
1764 // running background processor.
1765 let scored_scid = 4242;
1766 let secp_ctx = Secp256k1::new();
1767 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1768 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1770 let path = Path { hops: vec![RouteHop {
1772 node_features: NodeFeatures::empty(),
1773 short_channel_id: scored_scid,
1774 channel_features: ChannelFeatures::empty(),
1776 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1777 maybe_announced_channel: true,
1778 }], blinded_tail: None };
1780 $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1781 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1783 payment_hash: PaymentHash([42; 32]),
1784 payment_failed_permanently: false,
1785 failure: PathFailure::OnPath { network_update: None },
1787 short_channel_id: Some(scored_scid),
1789 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1791 Event::PaymentPathFailed { .. } => {},
1792 _ => panic!("Unexpected event"),
1795 // Ensure we'll score payments that were explicitly failed back by the destination as
1797 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1798 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1800 payment_hash: PaymentHash([42; 32]),
1801 payment_failed_permanently: true,
1802 failure: PathFailure::OnPath { network_update: None },
1804 short_channel_id: None,
1806 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1808 Event::PaymentPathFailed { .. } => {},
1809 _ => panic!("Unexpected event"),
1812 $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1813 $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1814 payment_id: PaymentId([42; 32]),
1818 let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1820 Event::PaymentPathSuccessful { .. } => {},
1821 _ => panic!("Unexpected event"),
1824 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1825 $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1826 payment_id: PaymentId([42; 32]),
1827 payment_hash: PaymentHash([42; 32]),
1830 let event = $receive.expect("ProbeSuccessful not handled within deadline");
1832 Event::ProbeSuccessful { .. } => {},
1833 _ => panic!("Unexpected event"),
1836 $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1837 $nodes[0].node.push_pending_event(Event::ProbeFailed {
1838 payment_id: PaymentId([42; 32]),
1839 payment_hash: PaymentHash([42; 32]),
1841 short_channel_id: Some(scored_scid),
1843 let event = $receive.expect("ProbeFailure not handled within deadline");
1845 Event::ProbeFailed { .. } => {},
1846 _ => panic!("Unexpected event"),
1852 fn test_payment_path_scoring() {
1853 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1854 let event_handler = move |event: Event| match event {
1855 Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1856 Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1857 Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1858 Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1859 _ => panic!("Unexpected event: {:?}", event),
1862 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1863 let data_dir = nodes[0].kv_store.get_data_dir();
1864 let persister = Arc::new(Persister::new(data_dir));
1865 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1867 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1869 if !std::thread::panicking() {
1870 bg_processor.stop().unwrap();
1873 let log_entries = nodes[0].logger.lines.lock().unwrap();
1874 let expected_log = "Persisting scorer after update".to_string();
1875 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1879 #[cfg(feature = "futures")]
1880 async fn test_payment_path_scoring_async() {
1881 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1882 let event_handler = move |event: Event| {
1883 let sender_ref = sender.clone();
1886 Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1887 Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1888 Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1889 Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1890 _ => panic!("Unexpected event: {:?}", event),
1895 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1896 let data_dir = nodes[0].kv_store.get_data_dir();
1897 let persister = Arc::new(Persister::new(data_dir));
1899 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1901 let bp_future = super::process_events_async(
1902 persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1903 nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1904 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1905 let mut exit_receiver = exit_receiver.clone();
1906 Box::pin(async move {
1908 _ = tokio::time::sleep(dur) => false,
1909 _ = exit_receiver.changed() => true,
1912 }, false, || Some(Duration::ZERO),
1914 let t1 = tokio::spawn(bp_future);
1915 let t2 = tokio::spawn(async move {
1916 do_test_payment_path_scoring!(nodes, receiver.recv().await);
1917 exit_sender.send(()).unwrap();
1919 let log_entries = nodes[0].logger.lines.lock().unwrap();
1920 let expected_log = "Persisting scorer after update".to_string();
1921 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1924 let (r1, r2) = tokio::join!(t1, t2);
1925 r1.unwrap().unwrap();