1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(rustdoc::broken_intra_doc_links)]
6 #![deny(rustdoc::private_intra_doc_links)]
9 #![cfg_attr(not(feature = "futures"), deny(unsafe_code))]
11 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
13 #![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
15 #[cfg(any(test, feature = "std"))]
18 #[cfg(not(feature = "std"))]
21 #[macro_use] extern crate lightning;
22 extern crate lightning_rapid_gossip_sync;
25 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
26 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
27 use lightning::sign::{EntropySource, NodeSigner, SignerProvider};
28 use lightning::events::{Event, PathFailure};
29 #[cfg(feature = "std")]
30 use lightning::events::EventHandler;
31 #[cfg(any(feature = "std", feature = "futures"))]
32 use lightning::events::EventsProvider;
34 use lightning::ln::channelmanager::ChannelManager;
35 use lightning::ln::msgs::OnionMessageHandler;
36 use lightning::ln::peer_handler::APeerManager;
37 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
38 use lightning::routing::utxo::UtxoLookup;
39 use lightning::routing::router::Router;
40 use lightning::routing::scoring::{ScoreUpdate, WriteableScore};
41 use lightning::util::logger::Logger;
42 use lightning::util::persist::Persister;
43 #[cfg(feature = "std")]
44 use lightning::util::wakers::Sleeper;
45 use lightning_rapid_gossip_sync::RapidGossipSync;
48 use core::time::Duration;
50 #[cfg(feature = "std")]
52 #[cfg(feature = "std")]
53 use core::sync::atomic::{AtomicBool, Ordering};
54 #[cfg(feature = "std")]
55 use std::thread::{self, JoinHandle};
56 #[cfg(feature = "std")]
57 use std::time::Instant;
59 #[cfg(not(feature = "std"))]
62 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
63 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
64 /// responsibilities are:
65 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
66 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
67 /// writing it to disk/backups by invoking the callback given to it at startup.
68 /// [`ChannelManager`] persistence should be done in the background.
69 /// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`]
70 /// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals.
71 /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a
72 /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]).
74 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
75 /// upon as doing so may result in high latency.
79 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
80 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
81 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
82 /// unilateral chain closure fees are at risk.
84 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
85 /// [`Event`]: lightning::events::Event
86 /// [`PeerManager::timer_tick_occurred`]: lightning::ln::peer_handler::PeerManager::timer_tick_occurred
87 /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events
88 #[cfg(feature = "std")]
89 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
90 pub struct BackgroundProcessor {
91 stop_thread: Arc<AtomicBool>,
92 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
96 const FRESHNESS_TIMER: u64 = 60;
98 const FRESHNESS_TIMER: u64 = 1;
100 #[cfg(all(not(test), not(debug_assertions)))]
101 const PING_TIMER: u64 = 10;
102 /// Signature operations take a lot longer without compiler optimisations.
103 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
104 /// timeout is reached.
105 #[cfg(all(not(test), debug_assertions))]
106 const PING_TIMER: u64 = 30;
108 const PING_TIMER: u64 = 1;
111 const ONION_MESSAGE_HANDLER_TIMER: u64 = 10;
113 const ONION_MESSAGE_HANDLER_TIMER: u64 = 1;
115 /// Prune the network graph of stale entries hourly.
116 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
119 const SCORER_PERSIST_TIMER: u64 = 60 * 5;
121 const SCORER_PERSIST_TIMER: u64 = 1;
124 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
126 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
129 const REBROADCAST_TIMER: u64 = 30;
131 const REBROADCAST_TIMER: u64 = 1;
133 #[cfg(feature = "futures")]
134 /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement
135 const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } }
136 #[cfg(feature = "futures")]
137 const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER),
138 min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER)));
140 /// Either [`P2PGossipSync`] or [`RapidGossipSync`].
142 P: Deref<Target = P2PGossipSync<G, U, L>>,
143 R: Deref<Target = RapidGossipSync<G, L>>,
144 G: Deref<Target = NetworkGraph<L>>,
148 where U::Target: UtxoLookup, L::Target: Logger {
149 /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7.
151 /// Rapid gossip sync from a trusted server.
158 P: Deref<Target = P2PGossipSync<G, U, L>>,
159 R: Deref<Target = RapidGossipSync<G, L>>,
160 G: Deref<Target = NetworkGraph<L>>,
163 > GossipSync<P, R, G, U, L>
164 where U::Target: UtxoLookup, L::Target: Logger {
165 fn network_graph(&self) -> Option<&G> {
167 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
168 GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()),
169 GossipSync::None => None,
173 fn prunable_network_graph(&self) -> Option<&G> {
175 GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()),
176 GossipSync::Rapid(gossip_sync) => {
177 if gossip_sync.is_initial_sync_complete() {
178 Some(gossip_sync.network_graph())
183 GossipSync::None => None,
188 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
189 impl<P: Deref<Target = P2PGossipSync<G, U, L>>, G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
190 GossipSync<P, &RapidGossipSync<G, L>, G, U, L>
192 U::Target: UtxoLookup,
195 /// Initializes a new [`GossipSync::P2P`] variant.
196 pub fn p2p(gossip_sync: P) -> Self {
197 GossipSync::P2P(gossip_sync)
201 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
202 impl<'a, R: Deref<Target = RapidGossipSync<G, L>>, G: Deref<Target = NetworkGraph<L>>, L: Deref>
204 &P2PGossipSync<G, &'a (dyn UtxoLookup + Send + Sync), L>,
207 &'a (dyn UtxoLookup + Send + Sync),
213 /// Initializes a new [`GossipSync::Rapid`] variant.
214 pub fn rapid(gossip_sync: R) -> Self {
215 GossipSync::Rapid(gossip_sync)
219 /// This is not exported to bindings users as the bindings concretize everything and have constructors for us
222 &P2PGossipSync<&'a NetworkGraph<L>, &'a (dyn UtxoLookup + Send + Sync), L>,
223 &RapidGossipSync<&'a NetworkGraph<L>, L>,
225 &'a (dyn UtxoLookup + Send + Sync),
231 /// Initializes a new [`GossipSync::None`] variant.
232 pub fn none() -> Self {
237 fn handle_network_graph_update<L: Deref>(
238 network_graph: &NetworkGraph<L>, event: &Event
239 ) where L::Target: Logger {
240 if let Event::PaymentPathFailed {
241 failure: PathFailure::OnPath { network_update: Some(ref upd) }, .. } = event
243 network_graph.handle_network_update(upd);
247 /// Updates scorer based on event and returns whether an update occurred so we can decide whether
249 fn update_scorer<'a, S: 'static + Deref<Target = SC> + Send + Sync, SC: 'a + WriteableScore<'a>>(
250 scorer: &'a S, event: &Event, duration_since_epoch: Duration,
253 Event::PaymentPathFailed { ref path, short_channel_id: Some(scid), .. } => {
254 let mut score = scorer.write_lock();
255 score.payment_path_failed(path, *scid, duration_since_epoch);
257 Event::PaymentPathFailed { ref path, payment_failed_permanently: true, .. } => {
258 // Reached if the destination explicitly failed it back. We treat this as a successful probe
259 // because the payment made it all the way to the destination with sufficient liquidity.
260 let mut score = scorer.write_lock();
261 score.probe_successful(path, duration_since_epoch);
263 Event::PaymentPathSuccessful { path, .. } => {
264 let mut score = scorer.write_lock();
265 score.payment_path_successful(path, duration_since_epoch);
267 Event::ProbeSuccessful { path, .. } => {
268 let mut score = scorer.write_lock();
269 score.probe_successful(path, duration_since_epoch);
271 Event::ProbeFailed { path, short_channel_id: Some(scid), .. } => {
272 let mut score = scorer.write_lock();
273 score.probe_failed(path, *scid, duration_since_epoch);
280 macro_rules! define_run_body {
282 $persister: ident, $chain_monitor: ident, $process_chain_monitor_events: expr,
283 $channel_manager: ident, $process_channel_manager_events: expr,
284 $peer_manager: ident, $process_onion_message_handler_events: expr, $gossip_sync: ident,
285 $logger: ident, $scorer: ident, $loop_exit_check: expr, $await: expr, $get_timer: expr,
286 $timer_elapsed: expr, $check_slow_await: expr, $time_fetch: expr,
288 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
289 $channel_manager.timer_tick_occurred();
290 log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
291 $chain_monitor.rebroadcast_pending_claims();
293 let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
294 let mut last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
295 let mut last_ping_call = $get_timer(PING_TIMER);
296 let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER);
297 let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
298 let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
299 let mut have_pruned = false;
300 let mut have_decayed_scorer = false;
303 $process_channel_manager_events;
304 $process_chain_monitor_events;
305 $process_onion_message_handler_events;
307 // Note that the PeerManager::process_events may block on ChannelManager's locks,
308 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
309 // we want to ensure we get into `persist_manager` as quickly as we can, especially
310 // without running the normal event processing above and handing events to users.
312 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
313 // processing a message effectively at any point during this loop. In order to
314 // minimize the time between such processing completing and persisting the updated
315 // ChannelManager, we want to minimize methods blocking on a ChannelManager
316 // generally, and as a fallback place such blocking only immediately before
318 $peer_manager.as_ref().process_events();
320 // Exit the loop if the background processor was requested to stop.
321 if $loop_exit_check {
322 log_trace!($logger, "Terminating background processor.");
326 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
327 // see `await_start`'s use below.
328 let mut await_start = None;
329 if $check_slow_await { await_start = Some($get_timer(1)); }
331 let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
333 // Exit the loop if the background processor was requested to stop.
334 if $loop_exit_check {
335 log_trace!($logger, "Terminating background processor.");
339 if $channel_manager.get_and_clear_needs_persistence() {
340 log_trace!($logger, "Persisting ChannelManager...");
341 $persister.persist_manager(&*$channel_manager)?;
342 log_trace!($logger, "Done persisting ChannelManager.");
344 if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
345 log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
346 $channel_manager.timer_tick_occurred();
347 last_freshness_call = $get_timer(FRESHNESS_TIMER);
349 if $timer_elapsed(&mut last_onion_message_handler_call, ONION_MESSAGE_HANDLER_TIMER) {
350 log_trace!($logger, "Calling OnionMessageHandler's timer_tick_occurred");
351 $peer_manager.onion_message_handler().timer_tick_occurred();
352 last_onion_message_handler_call = $get_timer(ONION_MESSAGE_HANDLER_TIMER);
355 // On various platforms, we may be starved of CPU cycles for several reasons.
356 // E.g. on iOS, if we've been in the background, we will be entirely paused.
357 // Similarly, if we're on a desktop platform and the device has been asleep, we
358 // may not get any cycles.
359 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
360 // full second, at which point we assume sockets may have been killed (they
361 // appear to be at least on some platforms, even if it has only been a second).
362 // Note that we have to take care to not get here just because user event
363 // processing was slow at the top of the loop. For example, the sample client
364 // may call Bitcoin Core RPCs during event handling, which very often takes
365 // more than a handful of seconds to complete, and shouldn't disconnect all our
367 log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
368 $peer_manager.as_ref().disconnect_all_peers();
369 last_ping_call = $get_timer(PING_TIMER);
370 } else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
371 log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
372 $peer_manager.as_ref().timer_tick_occurred();
373 last_ping_call = $get_timer(PING_TIMER);
376 // Note that we want to run a graph prune once not long after startup before
377 // falling back to our usual hourly prunes. This avoids short-lived clients never
378 // pruning their network graph. We run once 60 seconds after startup before
379 // continuing our normal cadence. For RGS, since 60 seconds is likely too long,
380 // we prune after an initial sync completes.
381 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
382 let prune_timer_elapsed = $timer_elapsed(&mut last_prune_call, prune_timer);
383 let should_prune = match $gossip_sync {
384 GossipSync::Rapid(_) => !have_pruned || prune_timer_elapsed,
385 _ => prune_timer_elapsed,
388 // The network graph must not be pruned while rapid sync completion is pending
389 if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
390 if let Some(duration_since_epoch) = $time_fetch() {
391 log_trace!($logger, "Pruning and persisting network graph.");
392 network_graph.remove_stale_channels_and_tracking_with_time(duration_since_epoch.as_secs());
394 log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
395 log_trace!($logger, "Persisting network graph.");
398 if let Err(e) = $persister.persist_graph(network_graph) {
399 log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
404 let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER };
405 last_prune_call = $get_timer(prune_timer);
408 if !have_decayed_scorer {
409 if let Some(ref scorer) = $scorer {
410 if let Some(duration_since_epoch) = $time_fetch() {
411 log_trace!($logger, "Calling time_passed on scorer at startup");
412 scorer.write_lock().time_passed(duration_since_epoch);
415 have_decayed_scorer = true;
418 if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
419 if let Some(ref scorer) = $scorer {
420 if let Some(duration_since_epoch) = $time_fetch() {
421 log_trace!($logger, "Calling time_passed and persisting scorer");
422 scorer.write_lock().time_passed(duration_since_epoch);
424 log_trace!($logger, "Persisting scorer");
426 if let Err(e) = $persister.persist_scorer(&scorer) {
427 log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
430 last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
433 if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
434 log_trace!($logger, "Rebroadcasting monitor's pending claims");
435 $chain_monitor.rebroadcast_pending_claims();
436 last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
440 // After we exit, ensure we persist the ChannelManager one final time - this avoids
441 // some races where users quit while channel updates were in-flight, with
442 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
443 $persister.persist_manager(&*$channel_manager)?;
445 // Persist Scorer on exit
446 if let Some(ref scorer) = $scorer {
447 $persister.persist_scorer(&scorer)?;
450 // Persist NetworkGraph on exit
451 if let Some(network_graph) = $gossip_sync.network_graph() {
452 $persister.persist_graph(network_graph)?;
459 #[cfg(feature = "futures")]
460 pub(crate) mod futures_util {
461 use core::future::Future;
462 use core::task::{Poll, Waker, RawWaker, RawWakerVTable};
464 use core::marker::Unpin;
465 pub(crate) struct Selector<
466 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
472 pub(crate) enum SelectorOutput {
477 A: Future<Output=()> + Unpin, B: Future<Output=()> + Unpin, C: Future<Output=bool> + Unpin
478 > Future for Selector<A, B, C> {
479 type Output = SelectorOutput;
480 fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll<SelectorOutput> {
481 match Pin::new(&mut self.a).poll(ctx) {
482 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::A); },
485 match Pin::new(&mut self.b).poll(ctx) {
486 Poll::Ready(()) => { return Poll::Ready(SelectorOutput::B); },
489 match Pin::new(&mut self.c).poll(ctx) {
490 Poll::Ready(res) => { return Poll::Ready(SelectorOutput::C(res)); },
497 // If we want to poll a future without an async context to figure out if it has completed or
498 // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values
499 // but sadly there's a good bit of boilerplate here.
500 fn dummy_waker_clone(_: *const ()) -> RawWaker { RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE) }
501 fn dummy_waker_action(_: *const ()) { }
503 const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
504 dummy_waker_clone, dummy_waker_action, dummy_waker_action, dummy_waker_action);
505 pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } }
507 #[cfg(feature = "futures")]
508 use futures_util::{Selector, SelectorOutput, dummy_waker};
509 #[cfg(feature = "futures")]
512 /// Processes background events in a future.
514 /// `sleeper` should return a future which completes in the given amount of time and returns a
515 /// boolean indicating whether the background processing should exit. Once `sleeper` returns a
516 /// future which outputs `true`, the loop will exit and this function's future will complete.
517 /// The `sleeper` future is free to return early after it has triggered the exit condition.
519 /// See [`BackgroundProcessor::start`] for information on which actions this handles.
521 /// Requires the `futures` feature. Note that while this method is available without the `std`
522 /// feature, doing so will skip calling [`NetworkGraph::remove_stale_channels_and_tracking`],
523 /// you should call [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] regularly
524 /// manually instead.
526 /// The `mobile_interruptable_platform` flag should be set if we're currently running on a
527 /// mobile device, where we may need to check for interruption of the application regularly. If you
528 /// are unsure, you should set the flag, as the performance impact of it is minimal unless there
529 /// are hundreds or thousands of simultaneous process calls running.
531 /// The `fetch_time` parameter should return the current wall clock time, if one is available. If
532 /// no time is available, some features may be disabled, however the node will still operate fine.
534 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
535 /// could setup `process_events_async` like this:
537 /// # use lightning::io;
538 /// # use std::sync::{Arc, RwLock};
539 /// # use std::sync::atomic::{AtomicBool, Ordering};
540 /// # use std::time::SystemTime;
541 /// # use lightning_background_processor::{process_events_async, GossipSync};
542 /// # struct MyStore {}
543 /// # impl lightning::util::persist::KVStore for MyStore {
544 /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
545 /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
546 /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
547 /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
549 /// # struct MyEventHandler {}
550 /// # impl MyEventHandler {
551 /// # async fn handle_event(&self, _: lightning::events::Event) {}
553 /// # #[derive(Eq, PartialEq, Clone, Hash)]
554 /// # struct MySocketDescriptor {}
555 /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor {
556 /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
557 /// # fn disconnect_socket(&mut self) {}
559 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
560 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
561 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
562 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
563 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
564 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
565 /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
566 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, Arc<MyUtxoLookup>, MyLogger>;
567 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
568 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
569 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
570 /// # type MyScorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
572 /// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
573 /// let background_persister = Arc::clone(&my_persister);
574 /// let background_event_handler = Arc::clone(&my_event_handler);
575 /// let background_chain_mon = Arc::clone(&my_chain_monitor);
576 /// let background_chan_man = Arc::clone(&my_channel_manager);
577 /// let background_gossip_sync = GossipSync::p2p(Arc::clone(&my_gossip_sync));
578 /// let background_peer_man = Arc::clone(&my_peer_manager);
579 /// let background_logger = Arc::clone(&my_logger);
580 /// let background_scorer = Arc::clone(&my_scorer);
582 /// // Setup the sleeper.
583 /// let (stop_sender, stop_receiver) = tokio::sync::watch::channel(());
585 /// let sleeper = move |d| {
586 /// let mut receiver = stop_receiver.clone();
587 /// Box::pin(async move {
589 /// _ = tokio::time::sleep(d) => false,
590 /// _ = receiver.changed() => true,
595 /// let mobile_interruptable_platform = false;
597 /// let handle = tokio::spawn(async move {
598 /// process_events_async(
599 /// background_persister,
600 /// |e| background_event_handler.handle_event(e),
601 /// background_chain_mon,
602 /// background_chan_man,
603 /// background_gossip_sync,
604 /// background_peer_man,
605 /// background_logger,
606 /// Some(background_scorer),
608 /// mobile_interruptable_platform,
609 /// || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap())
612 /// .expect("Failed to process events");
615 /// // Stop the background processing.
616 /// stop_sender.send(()).unwrap();
617 /// handle.await.unwrap();
620 #[cfg(feature = "futures")]
621 pub async fn process_events_async<
623 UL: 'static + Deref + Send + Sync,
624 CF: 'static + Deref + Send + Sync,
625 CW: 'static + Deref + Send + Sync,
626 T: 'static + Deref + Send + Sync,
627 ES: 'static + Deref + Send + Sync,
628 NS: 'static + Deref + Send + Sync,
629 SP: 'static + Deref + Send + Sync,
630 F: 'static + Deref + Send + Sync,
631 R: 'static + Deref + Send + Sync,
632 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
633 L: 'static + Deref + Send + Sync,
634 P: 'static + Deref + Send + Sync,
635 EventHandlerFuture: core::future::Future<Output = ()>,
636 EventHandler: Fn(Event) -> EventHandlerFuture,
637 PS: 'static + Deref + Send,
638 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
639 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
640 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
641 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
642 PM: 'static + Deref + Send + Sync,
643 S: 'static + Deref<Target = SC> + Send + Sync,
644 SC: for<'b> WriteableScore<'b>,
645 SleepFuture: core::future::Future<Output = bool> + core::marker::Unpin,
646 Sleeper: Fn(Duration) -> SleepFuture,
647 FetchTime: Fn() -> Option<Duration>,
649 persister: PS, event_handler: EventHandler, chain_monitor: M, channel_manager: CM,
650 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
651 sleeper: Sleeper, mobile_interruptable_platform: bool, fetch_time: FetchTime,
652 ) -> Result<(), lightning::io::Error>
654 UL::Target: 'static + UtxoLookup,
655 CF::Target: 'static + chain::Filter,
656 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
657 T::Target: 'static + BroadcasterInterface,
658 ES::Target: 'static + EntropySource,
659 NS::Target: 'static + NodeSigner,
660 SP::Target: 'static + SignerProvider,
661 F::Target: 'static + FeeEstimator,
662 R::Target: 'static + Router,
663 L::Target: 'static + Logger,
664 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
665 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
666 PM::Target: APeerManager + Send + Sync,
668 let mut should_break = false;
669 let async_event_handler = |event| {
670 let network_graph = gossip_sync.network_graph();
671 let event_handler = &event_handler;
672 let scorer = &scorer;
673 let logger = &logger;
674 let persister = &persister;
675 let fetch_time = &fetch_time;
677 if let Some(network_graph) = network_graph {
678 handle_network_graph_update(network_graph, &event)
680 if let Some(ref scorer) = scorer {
681 if let Some(duration_since_epoch) = fetch_time() {
682 if update_scorer(scorer, &event, duration_since_epoch) {
683 log_trace!(logger, "Persisting scorer after update");
684 if let Err(e) = persister.persist_scorer(&scorer) {
685 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
690 event_handler(event).await;
694 persister, chain_monitor,
695 chain_monitor.process_pending_events_async(async_event_handler).await,
696 channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
698 for event in onion_message_handler_events(peer_manager) {
701 gossip_sync, logger, scorer, should_break, {
703 a: channel_manager.get_event_or_persistence_needed_future(),
704 b: chain_monitor.get_update_future(),
705 c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
708 SelectorOutput::A|SelectorOutput::B => {},
709 SelectorOutput::C(exit) => {
713 }, |t| sleeper(Duration::from_secs(t)),
714 |fut: &mut SleepFuture, _| {
715 let mut waker = dummy_waker();
716 let mut ctx = task::Context::from_waker(&mut waker);
717 match core::pin::Pin::new(fut).poll(&mut ctx) {
718 task::Poll::Ready(exit) => { should_break = exit; true },
719 task::Poll::Pending => false,
721 }, mobile_interruptable_platform, fetch_time,
725 fn onion_message_handler_events<PM: 'static + Deref + Send + Sync>(
727 ) -> impl Iterator<Item=Event> where PM::Target: APeerManager + Send + Sync {
728 peer_manager.onion_message_handler().get_and_clear_connections_needed()
729 .into_iter().map(|(node_id, addresses)| Event::ConnectionNeeded { node_id, addresses })
732 #[cfg(feature = "std")]
733 impl BackgroundProcessor {
734 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
737 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
738 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
739 /// either [`join`] or [`stop`].
741 /// # Data Persistence
743 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
744 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
745 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
746 /// provided implementation.
748 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if
749 /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`].
750 /// See the `lightning-persister` crate for LDK's provided implementation.
752 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
753 /// error or call [`join`] and handle any error that may arise. For the latter case,
754 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
758 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
759 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
760 /// functionality implemented by other handlers.
761 /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures.
763 /// # Rapid Gossip Sync
765 /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync`
766 /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance
767 /// until the [`RapidGossipSync`] instance completes its first sync.
769 /// [top-level documentation]: BackgroundProcessor
770 /// [`join`]: Self::join
771 /// [`stop`]: Self::stop
772 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
773 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
774 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
775 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
776 /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph
777 /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable
780 UL: 'static + Deref + Send + Sync,
781 CF: 'static + Deref + Send + Sync,
782 CW: 'static + Deref + Send + Sync,
783 T: 'static + Deref + Send + Sync,
784 ES: 'static + Deref + Send + Sync,
785 NS: 'static + Deref + Send + Sync,
786 SP: 'static + Deref + Send + Sync,
787 F: 'static + Deref + Send + Sync,
788 R: 'static + Deref + Send + Sync,
789 G: 'static + Deref<Target = NetworkGraph<L>> + Send + Sync,
790 L: 'static + Deref + Send + Sync,
791 P: 'static + Deref + Send + Sync,
792 EH: 'static + EventHandler + Send,
793 PS: 'static + Deref + Send,
794 M: 'static + Deref<Target = ChainMonitor<<SP::Target as SignerProvider>::EcdsaSigner, CF, T, F, L, P>> + Send + Sync,
795 CM: 'static + Deref<Target = ChannelManager<CW, T, ES, NS, SP, F, R, L>> + Send + Sync,
796 PGS: 'static + Deref<Target = P2PGossipSync<G, UL, L>> + Send + Sync,
797 RGS: 'static + Deref<Target = RapidGossipSync<G, L>> + Send,
798 PM: 'static + Deref + Send + Sync,
799 S: 'static + Deref<Target = SC> + Send + Sync,
800 SC: for <'b> WriteableScore<'b>,
802 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
803 gossip_sync: GossipSync<PGS, RGS, G, UL, L>, peer_manager: PM, logger: L, scorer: Option<S>,
806 UL::Target: 'static + UtxoLookup,
807 CF::Target: 'static + chain::Filter,
808 CW::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
809 T::Target: 'static + BroadcasterInterface,
810 ES::Target: 'static + EntropySource,
811 NS::Target: 'static + NodeSigner,
812 SP::Target: 'static + SignerProvider,
813 F::Target: 'static + FeeEstimator,
814 R::Target: 'static + Router,
815 L::Target: 'static + Logger,
816 P::Target: 'static + Persist<<SP::Target as SignerProvider>::EcdsaSigner>,
817 PS::Target: 'static + Persister<'a, CW, T, ES, NS, SP, F, R, L, SC>,
818 PM::Target: APeerManager + Send + Sync,
820 let stop_thread = Arc::new(AtomicBool::new(false));
821 let stop_thread_clone = stop_thread.clone();
822 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
823 let event_handler = |event| {
824 let network_graph = gossip_sync.network_graph();
825 if let Some(network_graph) = network_graph {
826 handle_network_graph_update(network_graph, &event)
828 if let Some(ref scorer) = scorer {
829 use std::time::SystemTime;
830 let duration_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
831 .expect("Time should be sometime after 1970");
832 if update_scorer(scorer, &event, duration_since_epoch) {
833 log_trace!(logger, "Persisting scorer after update");
834 if let Err(e) = persister.persist_scorer(&scorer) {
835 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
839 event_handler.handle_event(event);
842 persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
843 channel_manager, channel_manager.process_pending_events(&event_handler),
845 for event in onion_message_handler_events(&peer_manager) {
846 event_handler.handle_event(event);
848 gossip_sync, logger, scorer, stop_thread.load(Ordering::Acquire),
849 { Sleeper::from_two_futures(
850 channel_manager.get_event_or_persistence_needed_future(),
851 chain_monitor.get_update_future()
852 ).wait_timeout(Duration::from_millis(100)); },
853 |_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false,
855 use std::time::SystemTime;
856 Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
857 .expect("Time should be sometime after 1970"))
861 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
864 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
865 /// [`ChannelManager`].
869 /// This function panics if the background thread has panicked such as while persisting or
872 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
873 pub fn join(mut self) -> Result<(), std::io::Error> {
874 assert!(self.thread_handle.is_some());
878 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
879 /// [`ChannelManager`].
883 /// This function panics if the background thread has panicked such as while persisting or
886 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
887 pub fn stop(mut self) -> Result<(), std::io::Error> {
888 assert!(self.thread_handle.is_some());
889 self.stop_and_join_thread()
892 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
893 self.stop_thread.store(true, Ordering::Release);
897 fn join_thread(&mut self) -> Result<(), std::io::Error> {
898 match self.thread_handle.take() {
899 Some(handle) => handle.join().unwrap(),
905 #[cfg(feature = "std")]
906 impl Drop for BackgroundProcessor {
908 self.stop_and_join_thread().unwrap();
912 #[cfg(all(feature = "std", test))]
914 use bitcoin::blockdata::constants::{genesis_block, ChainHash};
915 use bitcoin::blockdata::locktime::absolute::LockTime;
916 use bitcoin::blockdata::transaction::{Transaction, TxOut};
917 use bitcoin::network::constants::Network;
918 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1};
919 use lightning::chain::{BestBlock, Confirm, chainmonitor};
920 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
921 use lightning::sign::{InMemorySigner, KeysManager};
922 use lightning::chain::transaction::OutPoint;
923 use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent};
924 use lightning::{get_event_msg, get_event};
925 use lightning::ln::PaymentHash;
926 use lightning::ln::channelmanager;
927 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId};
928 use lightning::ln::features::{ChannelFeatures, NodeFeatures};
929 use lightning::ln::functional_test_utils::*;
930 use lightning::ln::msgs::{ChannelMessageHandler, Init};
931 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
932 use lightning::routing::gossip::{NetworkGraph, P2PGossipSync};
933 use lightning::routing::scoring::{ChannelUsage, ScoreUpdate, ScoreLookUp, LockableScore};
934 use lightning::routing::router::{DefaultRouter, Path, RouteHop, CandidateRouteHop};
935 use lightning::util::config::UserConfig;
936 use lightning::util::ser::Writeable;
937 use lightning::util::test_utils;
938 use lightning::util::persist::{KVStore,
939 CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY,
940 NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY,
941 SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY};
942 use lightning_persister::fs_store::FilesystemStore;
943 use std::collections::VecDeque;
945 use std::path::PathBuf;
946 use std::sync::{Arc, Mutex};
947 use std::sync::mpsc::SyncSender;
948 use std::time::Duration;
949 use lightning_rapid_gossip_sync::RapidGossipSync;
950 use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER};
952 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
954 #[derive(Clone, Hash, PartialEq, Eq)]
955 struct TestDescriptor{}
956 impl SocketDescriptor for TestDescriptor {
957 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
961 fn disconnect_socket(&mut self) {}
965 type LockingWrapper<T> = lightning::routing::scoring::MultiThreadedLockableScore<T>;
966 #[cfg(not(c_bindings))]
967 type LockingWrapper<T> = Mutex<T>;
969 type ChannelManager =
970 channelmanager::ChannelManager<
972 Arc<test_utils::TestBroadcaster>,
976 Arc<test_utils::TestFeeEstimator>,
978 Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
979 Arc<test_utils::TestLogger>,
981 Arc<LockingWrapper<TestScorer>>>
983 Arc<test_utils::TestLogger>>;
985 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
987 type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
988 type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
991 node: Arc<ChannelManager>,
992 p2p_gossip_sync: PGS,
993 rapid_gossip_sync: RGS,
994 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
995 chain_monitor: Arc<ChainMonitor>,
996 kv_store: Arc<FilesystemStore>,
997 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
998 network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
999 logger: Arc<test_utils::TestLogger>,
1000 best_block: BestBlock,
1001 scorer: Arc<LockingWrapper<TestScorer>>,
1005 fn p2p_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1006 GossipSync::P2P(self.p2p_gossip_sync.clone())
1009 fn rapid_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1010 GossipSync::Rapid(self.rapid_gossip_sync.clone())
1013 fn no_gossip_sync(&self) -> GossipSync<PGS, RGS, Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>> {
1018 impl Drop for Node {
1019 fn drop(&mut self) {
1020 let data_dir = self.kv_store.get_data_dir();
1021 match fs::remove_dir_all(data_dir.clone()) {
1022 Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
1029 graph_error: Option<(std::io::ErrorKind, &'static str)>,
1030 graph_persistence_notifier: Option<SyncSender<()>>,
1031 manager_error: Option<(std::io::ErrorKind, &'static str)>,
1032 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
1033 kv_store: FilesystemStore,
1037 fn new(data_dir: PathBuf) -> Self {
1038 let kv_store = FilesystemStore::new(data_dir);
1039 Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
1042 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1043 Self { graph_error: Some((error, message)), ..self }
1046 fn with_graph_persistence_notifier(self, sender: SyncSender<()>) -> Self {
1047 Self { graph_persistence_notifier: Some(sender), ..self }
1050 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1051 Self { manager_error: Some((error, message)), ..self }
1054 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
1055 Self { scorer_error: Some((error, message)), ..self }
1059 impl KVStore for Persister {
1060 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
1061 self.kv_store.read(primary_namespace, secondary_namespace, key)
1064 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
1065 if primary_namespace == CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE &&
1066 secondary_namespace == CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE &&
1067 key == CHANNEL_MANAGER_PERSISTENCE_KEY
1069 if let Some((error, message)) = self.manager_error {
1070 return Err(std::io::Error::new(error, message))
1074 if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE &&
1075 secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE &&
1076 key == NETWORK_GRAPH_PERSISTENCE_KEY
1078 if let Some(sender) = &self.graph_persistence_notifier {
1079 match sender.send(()) {
1081 Err(std::sync::mpsc::SendError(())) => println!("Persister failed to notify as receiver went away."),
1085 if let Some((error, message)) = self.graph_error {
1086 return Err(std::io::Error::new(error, message))
1090 if primary_namespace == SCORER_PERSISTENCE_PRIMARY_NAMESPACE &&
1091 secondary_namespace == SCORER_PERSISTENCE_SECONDARY_NAMESPACE &&
1092 key == SCORER_PERSISTENCE_KEY
1094 if let Some((error, message)) = self.scorer_error {
1095 return Err(std::io::Error::new(error, message))
1099 self.kv_store.write(primary_namespace, secondary_namespace, key, buf)
1102 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
1103 self.kv_store.remove(primary_namespace, secondary_namespace, key, lazy)
1106 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
1107 self.kv_store.list(primary_namespace, secondary_namespace)
1112 event_expectations: Option<VecDeque<TestResult>>,
1117 PaymentFailure { path: Path, short_channel_id: u64 },
1118 PaymentSuccess { path: Path },
1119 ProbeFailure { path: Path },
1120 ProbeSuccess { path: Path },
1125 Self { event_expectations: None }
1128 fn expect(&mut self, expectation: TestResult) {
1129 self.event_expectations.get_or_insert_with(VecDeque::new).push_back(expectation);
1133 impl lightning::util::ser::Writeable for TestScorer {
1134 fn write<W: lightning::util::ser::Writer>(&self, _: &mut W) -> Result<(), lightning::io::Error> { Ok(()) }
1137 impl ScoreLookUp for TestScorer {
1138 #[cfg(not(c_bindings))]
1139 type ScoreParams = ();
1140 fn channel_penalty_msat(
1141 &self, _candidate: &CandidateRouteHop, _usage: ChannelUsage, _score_params: &lightning::routing::scoring::ProbabilisticScoringFeeParameters
1142 ) -> u64 { unimplemented!(); }
1145 impl ScoreUpdate for TestScorer {
1146 fn payment_path_failed(&mut self, actual_path: &Path, actual_short_channel_id: u64, _: Duration) {
1147 if let Some(expectations) = &mut self.event_expectations {
1148 match expectations.pop_front().unwrap() {
1149 TestResult::PaymentFailure { path, short_channel_id } => {
1150 assert_eq!(actual_path, &path);
1151 assert_eq!(actual_short_channel_id, short_channel_id);
1153 TestResult::PaymentSuccess { path } => {
1154 panic!("Unexpected successful payment path: {:?}", path)
1156 TestResult::ProbeFailure { path } => {
1157 panic!("Unexpected probe failure: {:?}", path)
1159 TestResult::ProbeSuccess { path } => {
1160 panic!("Unexpected probe success: {:?}", path)
1166 fn payment_path_successful(&mut self, actual_path: &Path, _: Duration) {
1167 if let Some(expectations) = &mut self.event_expectations {
1168 match expectations.pop_front().unwrap() {
1169 TestResult::PaymentFailure { path, .. } => {
1170 panic!("Unexpected payment path failure: {:?}", path)
1172 TestResult::PaymentSuccess { path } => {
1173 assert_eq!(actual_path, &path);
1175 TestResult::ProbeFailure { path } => {
1176 panic!("Unexpected probe failure: {:?}", path)
1178 TestResult::ProbeSuccess { path } => {
1179 panic!("Unexpected probe success: {:?}", path)
1185 fn probe_failed(&mut self, actual_path: &Path, _: u64, _: Duration) {
1186 if let Some(expectations) = &mut self.event_expectations {
1187 match expectations.pop_front().unwrap() {
1188 TestResult::PaymentFailure { path, .. } => {
1189 panic!("Unexpected payment path failure: {:?}", path)
1191 TestResult::PaymentSuccess { path } => {
1192 panic!("Unexpected payment path success: {:?}", path)
1194 TestResult::ProbeFailure { path } => {
1195 assert_eq!(actual_path, &path);
1197 TestResult::ProbeSuccess { path } => {
1198 panic!("Unexpected probe success: {:?}", path)
1203 fn probe_successful(&mut self, actual_path: &Path, _: Duration) {
1204 if let Some(expectations) = &mut self.event_expectations {
1205 match expectations.pop_front().unwrap() {
1206 TestResult::PaymentFailure { path, .. } => {
1207 panic!("Unexpected payment path failure: {:?}", path)
1209 TestResult::PaymentSuccess { path } => {
1210 panic!("Unexpected payment path success: {:?}", path)
1212 TestResult::ProbeFailure { path } => {
1213 panic!("Unexpected probe failure: {:?}", path)
1215 TestResult::ProbeSuccess { path } => {
1216 assert_eq!(actual_path, &path);
1221 fn time_passed(&mut self, _: Duration) {}
1225 impl lightning::routing::scoring::Score for TestScorer {}
1227 impl Drop for TestScorer {
1228 fn drop(&mut self) {
1229 if std::thread::panicking() {
1233 if let Some(event_expectations) = &self.event_expectations {
1234 if !event_expectations.is_empty() {
1235 panic!("Unsatisfied event expectations: {:?}", event_expectations);
1241 fn get_full_filepath(filepath: String, filename: String) -> String {
1242 let mut path = PathBuf::from(filepath);
1243 path.push(filename);
1244 path.to_str().unwrap().to_string()
1247 fn create_nodes(num_nodes: usize, persist_dir: &str) -> (String, Vec<Node>) {
1248 let persist_temp_path = env::temp_dir().join(persist_dir);
1249 let persist_dir = persist_temp_path.to_string_lossy().to_string();
1250 let network = Network::Bitcoin;
1251 let mut nodes = Vec::new();
1252 for i in 0..num_nodes {
1253 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster::new(network));
1254 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
1255 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
1256 let genesis_block = genesis_block(network);
1257 let network_graph = Arc::new(NetworkGraph::new(network, logger.clone()));
1258 let scorer = Arc::new(LockingWrapper::new(TestScorer::new()));
1259 let now = Duration::from_secs(genesis_block.header.time as u64);
1260 let seed = [i as u8; 32];
1261 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
1262 let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), Arc::clone(&keys_manager), scorer.clone(), Default::default()));
1263 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
1264 let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
1265 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
1266 let best_block = BestBlock::from_network(network);
1267 let params = ChainParameters { network, best_block };
1268 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
1269 let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()));
1270 let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone(), logger.clone()));
1271 let msg_handler = MessageHandler {
1272 chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new(ChainHash::using_genesis_block(Network::Testnet))),
1273 route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new()),
1274 onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
1276 let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
1277 let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
1281 for i in 0..num_nodes {
1282 for j in (i+1)..num_nodes {
1283 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init {
1284 features: nodes[j].node.init_features(), networks: None, remote_network_address: None
1286 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init {
1287 features: nodes[i].node.init_features(), networks: None, remote_network_address: None
1292 (persist_dir, nodes)
1295 macro_rules! open_channel {
1296 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1297 begin_open_channel!($node_a, $node_b, $channel_value);
1298 let events = $node_a.node.get_and_clear_pending_events();
1299 assert_eq!(events.len(), 1);
1300 let (temporary_channel_id, tx) = handle_funding_generation_ready!(events[0], $channel_value);
1301 $node_a.node.funding_transaction_generated(&temporary_channel_id, &$node_b.node.get_our_node_id(), tx.clone()).unwrap();
1302 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
1303 get_event!($node_b, Event::ChannelPending);
1304 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
1305 get_event!($node_a, Event::ChannelPending);
1310 macro_rules! begin_open_channel {
1311 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
1312 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None, None).unwrap();
1313 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
1314 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
1318 macro_rules! handle_funding_generation_ready {
1319 ($event: expr, $channel_value: expr) => {{
1321 Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id, .. } => {
1322 assert_eq!(channel_value_satoshis, $channel_value);
1323 assert_eq!(user_channel_id, 42);
1325 let tx = Transaction { version: 1 as i32, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
1326 value: channel_value_satoshis, script_pubkey: output_script.clone(),
1328 (temporary_channel_id, tx)
1330 _ => panic!("Unexpected event"),
1335 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
1336 for i in 1..=depth {
1337 let prev_blockhash = node.best_block.block_hash();
1338 let height = node.best_block.height() + 1;
1339 let header = create_dummy_header(prev_blockhash, height);
1340 let txdata = vec![(0, tx)];
1341 node.best_block = BestBlock::new(header.block_hash(), height);
1344 node.node.transactions_confirmed(&header, &txdata, height);
1345 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
1347 x if x == depth => {
1348 node.node.best_block_updated(&header, height);
1349 node.chain_monitor.best_block_updated(&header, height);
1355 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
1356 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
1360 fn test_background_processor() {
1361 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
1362 // updates. Also test that when new updates are available, the manager signals that it needs
1363 // re-persistence and is successfully re-persisted.
1364 let (persist_dir, nodes) = create_nodes(2, "test_background_processor");
1366 // Go through the channel creation process so that each node has something to persist. Since
1367 // open_channel consumes events, it must complete before starting BackgroundProcessor to
1368 // avoid a race with processing events.
1369 let tx = open_channel!(nodes[0], nodes[1], 100000);
1371 // Initiate the background processors to watch each node.
1372 let data_dir = nodes[0].kv_store.get_data_dir();
1373 let persister = Arc::new(Persister::new(data_dir));
1374 let event_handler = |_: _| {};
1375 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1377 macro_rules! check_persisted_data {
1378 ($node: expr, $filepath: expr) => {
1379 let mut expected_bytes = Vec::new();
1381 expected_bytes.clear();
1382 match $node.write(&mut expected_bytes) {
1384 match std::fs::read($filepath) {
1386 if bytes == expected_bytes {
1395 Err(e) => panic!("Unexpected error: {}", e)
1401 // Check that the initial channel manager data is persisted as expected.
1402 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "manager".to_string());
1403 check_persisted_data!(nodes[0].node, filepath.clone());
1406 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1409 // Force-close the channel.
1410 nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
1412 // Check that the force-close updates are persisted.
1413 check_persisted_data!(nodes[0].node, filepath.clone());
1415 if !nodes[0].node.get_event_or_persist_condvar_value() { break }
1418 // Check network graph is persisted
1419 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "network_graph".to_string());
1420 check_persisted_data!(nodes[0].network_graph, filepath.clone());
1422 // Check scorer is persisted
1423 let filepath = get_full_filepath(format!("{}_persister_0", &persist_dir), "scorer".to_string());
1424 check_persisted_data!(nodes[0].scorer, filepath.clone());
1426 if !std::thread::panicking() {
1427 bg_processor.stop().unwrap();
1432 fn test_timer_tick_called() {
1434 // - `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`,
1435 // - `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`,
1436 // - `PeerManager::timer_tick_occurred` is called every `PING_TIMER`, and
1437 // - `OnionMessageHandler::timer_tick_occurred` is called every `ONION_MESSAGE_HANDLER_TIMER`.
1438 let (_, nodes) = create_nodes(1, "test_timer_tick_called");
1439 let data_dir = nodes[0].kv_store.get_data_dir();
1440 let persister = Arc::new(Persister::new(data_dir));
1441 let event_handler = |_: _| {};
1442 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1444 let log_entries = nodes[0].logger.lines.lock().unwrap();
1445 let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
1446 let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
1447 let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
1448 let desired_log_4 = "Calling OnionMessageHandler's timer_tick_occurred".to_string();
1449 if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
1450 log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
1451 log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() &&
1452 log_entries.get(&("lightning_background_processor", desired_log_4)).is_some() {
1457 if !std::thread::panicking() {
1458 bg_processor.stop().unwrap();
1463 fn test_channel_manager_persist_error() {
1464 // Test that if we encounter an error during manager persistence, the thread panics.
1465 let (_, nodes) = create_nodes(2, "test_persist_error");
1466 open_channel!(nodes[0], nodes[1], 100000);
1468 let data_dir = nodes[0].kv_store.get_data_dir();
1469 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1470 let event_handler = |_: _| {};
1471 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1472 match bg_processor.join() {
1473 Ok(_) => panic!("Expected error persisting manager"),
1475 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1476 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1482 #[cfg(feature = "futures")]
1483 async fn test_channel_manager_persist_error_async() {
1484 // Test that if we encounter an error during manager persistence, the thread panics.
1485 let (_, nodes) = create_nodes(2, "test_persist_error_sync");
1486 open_channel!(nodes[0], nodes[1], 100000);
1488 let data_dir = nodes[0].kv_store.get_data_dir();
1489 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
1491 let bp_future = super::process_events_async(
1492 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1493 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1494 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1495 Box::pin(async move {
1496 tokio::time::sleep(dur).await;
1499 }, false, || Some(Duration::ZERO),
1501 match bp_future.await {
1502 Ok(_) => panic!("Expected error persisting manager"),
1504 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1505 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1511 fn test_network_graph_persist_error() {
1512 // Test that if we encounter an error during network graph persistence, an error gets returned.
1513 let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
1514 let data_dir = nodes[0].kv_store.get_data_dir();
1515 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
1516 let event_handler = |_: _| {};
1517 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1519 match bg_processor.stop() {
1520 Ok(_) => panic!("Expected error persisting network graph"),
1522 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1523 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1529 fn test_scorer_persist_error() {
1530 // Test that if we encounter an error during scorer persistence, an error gets returned.
1531 let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
1532 let data_dir = nodes[0].kv_store.get_data_dir();
1533 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
1534 let event_handler = |_: _| {};
1535 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1537 match bg_processor.stop() {
1538 Ok(_) => panic!("Expected error persisting scorer"),
1540 assert_eq!(e.kind(), std::io::ErrorKind::Other);
1541 assert_eq!(e.get_ref().unwrap().to_string(), "test");
1547 fn test_background_event_handling() {
1548 let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
1549 let channel_value = 100000;
1550 let data_dir = nodes[0].kv_store.get_data_dir();
1551 let persister = Arc::new(Persister::new(data_dir.clone()));
1553 // Set up a background event handler for FundingGenerationReady events.
1554 let (funding_generation_send, funding_generation_recv) = std::sync::mpsc::sync_channel(1);
1555 let (channel_pending_send, channel_pending_recv) = std::sync::mpsc::sync_channel(1);
1556 let event_handler = move |event: Event| match event {
1557 Event::FundingGenerationReady { .. } => funding_generation_send.send(handle_funding_generation_ready!(event, channel_value)).unwrap(),
1558 Event::ChannelPending { .. } => channel_pending_send.send(()).unwrap(),
1559 Event::ChannelReady { .. } => {},
1560 _ => panic!("Unexpected event: {:?}", event),
1563 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1565 // Open a channel and check that the FundingGenerationReady event was handled.
1566 begin_open_channel!(nodes[0], nodes[1], channel_value);
1567 let (temporary_channel_id, funding_tx) = funding_generation_recv
1568 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1569 .expect("FundingGenerationReady not handled within deadline");
1570 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1571 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
1572 get_event!(nodes[1], Event::ChannelPending);
1573 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1574 let _ = channel_pending_recv.recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1575 .expect("ChannelPending not handled within deadline");
1577 // Confirm the funding transaction.
1578 confirm_transaction(&mut nodes[0], &funding_tx);
1579 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
1580 confirm_transaction(&mut nodes[1], &funding_tx);
1581 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id());
1582 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_funding);
1583 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1584 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_funding);
1585 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1587 if !std::thread::panicking() {
1588 bg_processor.stop().unwrap();
1591 // Set up a background event handler for SpendableOutputs events.
1592 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1593 let event_handler = move |event: Event| match event {
1594 Event::SpendableOutputs { .. } => sender.send(event).unwrap(),
1595 Event::ChannelReady { .. } => {},
1596 Event::ChannelClosed { .. } => {},
1597 _ => panic!("Unexpected event: {:?}", event),
1599 let persister = Arc::new(Persister::new(data_dir));
1600 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1602 // Force close the channel and check that the SpendableOutputs event was handled.
1603 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1604 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
1605 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
1607 let event = receiver
1608 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
1609 .expect("Events not handled within deadline");
1611 Event::SpendableOutputs { .. } => {},
1612 _ => panic!("Unexpected event: {:?}", event),
1615 if !std::thread::panicking() {
1616 bg_processor.stop().unwrap();
1621 fn test_scorer_persistence() {
1622 let (_, nodes) = create_nodes(2, "test_scorer_persistence");
1623 let data_dir = nodes[0].kv_store.get_data_dir();
1624 let persister = Arc::new(Persister::new(data_dir));
1625 let event_handler = |_: _| {};
1626 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1629 let log_entries = nodes[0].logger.lines.lock().unwrap();
1630 let expected_log = "Calling time_passed and persisting scorer".to_string();
1631 if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
1636 if !std::thread::panicking() {
1637 bg_processor.stop().unwrap();
1641 macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1642 ($nodes: expr, $receive: expr, $sleep: expr) => {
1643 let features = ChannelFeatures::empty();
1644 $nodes[0].network_graph.add_channel_from_partial_announcement(
1645 42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
1646 ).expect("Failed to update channel from partial announcement");
1647 let original_graph_description = $nodes[0].network_graph.to_string();
1648 assert!(original_graph_description.contains("42: features: 0000, node_one:"));
1649 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
1653 let log_entries = $nodes[0].logger.lines.lock().unwrap();
1654 let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
1655 if *log_entries.get(&("lightning_background_processor", loop_counter))
1658 // Wait until the loop has gone around at least twice.
1663 let initialization_input = vec![
1664 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
1665 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
1666 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
1667 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
1668 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
1669 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
1670 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
1671 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
1672 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
1673 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
1674 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
1675 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
1676 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
1678 $nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
1680 // this should have added two channels and pruned the previous one.
1681 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 2);
1683 $receive.expect("Network graph not pruned within deadline");
1685 // all channels should now be pruned
1686 assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
1691 fn test_not_pruning_network_graph_until_graph_sync_completion() {
1692 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1694 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
1695 let data_dir = nodes[0].kv_store.get_data_dir();
1696 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1698 let event_handler = |_: _| {};
1699 let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1701 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
1702 receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
1703 std::thread::sleep(Duration::from_millis(1)));
1705 background_processor.stop().unwrap();
1709 #[cfg(feature = "futures")]
1710 async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
1711 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1713 let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
1714 let data_dir = nodes[0].kv_store.get_data_dir();
1715 let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
1717 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1718 let bp_future = super::process_events_async(
1719 persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1720 nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1721 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1722 let mut exit_receiver = exit_receiver.clone();
1723 Box::pin(async move {
1725 _ = tokio::time::sleep(dur) => false,
1726 _ = exit_receiver.changed() => true,
1729 }, false, || Some(Duration::from_secs(1696300000)),
1732 let t1 = tokio::spawn(bp_future);
1733 let t2 = tokio::spawn(async move {
1734 do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
1737 tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
1738 if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
1742 }, tokio::time::sleep(Duration::from_millis(1)).await);
1743 exit_sender.send(()).unwrap();
1745 let (r1, r2) = tokio::join!(t1, t2);
1746 r1.unwrap().unwrap();
1750 macro_rules! do_test_payment_path_scoring {
1751 ($nodes: expr, $receive: expr) => {
1752 // Ensure that we update the scorer when relevant events are processed. In this case, we ensure
1753 // that we update the scorer upon a payment path succeeding (note that the channel must be
1754 // public or else we won't score it).
1755 // A background event handler for FundingGenerationReady events must be hooked up to a
1756 // running background processor.
1757 let scored_scid = 4242;
1758 let secp_ctx = Secp256k1::new();
1759 let node_1_privkey = SecretKey::from_slice(&[42; 32]).unwrap();
1760 let node_1_id = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey);
1762 let path = Path { hops: vec![RouteHop {
1764 node_features: NodeFeatures::empty(),
1765 short_channel_id: scored_scid,
1766 channel_features: ChannelFeatures::empty(),
1768 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA as u32,
1769 maybe_announced_channel: true,
1770 }], blinded_tail: None };
1772 $nodes[0].scorer.write_lock().expect(TestResult::PaymentFailure { path: path.clone(), short_channel_id: scored_scid });
1773 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1775 payment_hash: PaymentHash([42; 32]),
1776 payment_failed_permanently: false,
1777 failure: PathFailure::OnPath { network_update: None },
1779 short_channel_id: Some(scored_scid),
1781 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1783 Event::PaymentPathFailed { .. } => {},
1784 _ => panic!("Unexpected event"),
1787 // Ensure we'll score payments that were explicitly failed back by the destination as
1789 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1790 $nodes[0].node.push_pending_event(Event::PaymentPathFailed {
1792 payment_hash: PaymentHash([42; 32]),
1793 payment_failed_permanently: true,
1794 failure: PathFailure::OnPath { network_update: None },
1796 short_channel_id: None,
1798 let event = $receive.expect("PaymentPathFailed not handled within deadline");
1800 Event::PaymentPathFailed { .. } => {},
1801 _ => panic!("Unexpected event"),
1804 $nodes[0].scorer.write_lock().expect(TestResult::PaymentSuccess { path: path.clone() });
1805 $nodes[0].node.push_pending_event(Event::PaymentPathSuccessful {
1806 payment_id: PaymentId([42; 32]),
1810 let event = $receive.expect("PaymentPathSuccessful not handled within deadline");
1812 Event::PaymentPathSuccessful { .. } => {},
1813 _ => panic!("Unexpected event"),
1816 $nodes[0].scorer.write_lock().expect(TestResult::ProbeSuccess { path: path.clone() });
1817 $nodes[0].node.push_pending_event(Event::ProbeSuccessful {
1818 payment_id: PaymentId([42; 32]),
1819 payment_hash: PaymentHash([42; 32]),
1822 let event = $receive.expect("ProbeSuccessful not handled within deadline");
1824 Event::ProbeSuccessful { .. } => {},
1825 _ => panic!("Unexpected event"),
1828 $nodes[0].scorer.write_lock().expect(TestResult::ProbeFailure { path: path.clone() });
1829 $nodes[0].node.push_pending_event(Event::ProbeFailed {
1830 payment_id: PaymentId([42; 32]),
1831 payment_hash: PaymentHash([42; 32]),
1833 short_channel_id: Some(scored_scid),
1835 let event = $receive.expect("ProbeFailure not handled within deadline");
1837 Event::ProbeFailed { .. } => {},
1838 _ => panic!("Unexpected event"),
1844 fn test_payment_path_scoring() {
1845 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
1846 let event_handler = move |event: Event| match event {
1847 Event::PaymentPathFailed { .. } => sender.send(event).unwrap(),
1848 Event::PaymentPathSuccessful { .. } => sender.send(event).unwrap(),
1849 Event::ProbeSuccessful { .. } => sender.send(event).unwrap(),
1850 Event::ProbeFailed { .. } => sender.send(event).unwrap(),
1851 _ => panic!("Unexpected event: {:?}", event),
1854 let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
1855 let data_dir = nodes[0].kv_store.get_data_dir();
1856 let persister = Arc::new(Persister::new(data_dir));
1857 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
1859 do_test_payment_path_scoring!(nodes, receiver.recv_timeout(Duration::from_secs(EVENT_DEADLINE)));
1861 if !std::thread::panicking() {
1862 bg_processor.stop().unwrap();
1865 let log_entries = nodes[0].logger.lines.lock().unwrap();
1866 let expected_log = "Persisting scorer after update".to_string();
1867 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1871 #[cfg(feature = "futures")]
1872 async fn test_payment_path_scoring_async() {
1873 let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
1874 let event_handler = move |event: Event| {
1875 let sender_ref = sender.clone();
1878 Event::PaymentPathFailed { .. } => { sender_ref.send(event).await.unwrap() },
1879 Event::PaymentPathSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1880 Event::ProbeSuccessful { .. } => { sender_ref.send(event).await.unwrap() },
1881 Event::ProbeFailed { .. } => { sender_ref.send(event).await.unwrap() },
1882 _ => panic!("Unexpected event: {:?}", event),
1887 let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
1888 let data_dir = nodes[0].kv_store.get_data_dir();
1889 let persister = Arc::new(Persister::new(data_dir));
1891 let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
1893 let bp_future = super::process_events_async(
1894 persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
1895 nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
1896 Some(nodes[0].scorer.clone()), move |dur: Duration| {
1897 let mut exit_receiver = exit_receiver.clone();
1898 Box::pin(async move {
1900 _ = tokio::time::sleep(dur) => false,
1901 _ = exit_receiver.changed() => true,
1904 }, false, || Some(Duration::ZERO),
1906 let t1 = tokio::spawn(bp_future);
1907 let t2 = tokio::spawn(async move {
1908 do_test_payment_path_scoring!(nodes, receiver.recv().await);
1909 exit_sender.send(()).unwrap();
1911 let log_entries = nodes[0].logger.lines.lock().unwrap();
1912 let expected_log = "Persisting scorer after update".to_string();
1913 assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
1916 let (r1, r2) = tokio::join!(t1, t2);
1917 r1.unwrap().unwrap();