X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-background-processor%2Fsrc%2Flib.rs;h=d38c30e584fa1237b0b3870dda8c7825d0d67e72;hb=refs%2Fheads%2F2022-08-async-man-update;hp=9783f9e7ebc2a5d31b3f4d76220eea104a9a7125;hpb=ac35492877637d6dbbe6ee93dc5ed0f678bfbb5f;p=rust-lightning diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 9783f9e7..d38c30e5 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -18,7 +18,7 @@ use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::ln::channelmanager::ChannelManager; use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor}; -use lightning::routing::network_graph::{NetworkGraph, P2PGossipSync}; +use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::scoring::WriteableScore; use lightning::util::events::{Event, EventHandler, EventsProvider}; use lightning::util::logger::Logger; @@ -31,6 +31,9 @@ use std::thread::JoinHandle; use std::time::{Duration, Instant}; use std::ops::Deref; +#[cfg(feature = "futures")] +use futures::{select, future::FutureExt}; + /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its /// responsibilities are: @@ -40,8 +43,8 @@ use std::ops::Deref; /// [`ChannelManager`] persistence should be done in the background. /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] /// at the appropriate intervals. -/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`P2PGossipSync`] is provided to -/// [`BackgroundProcessor::start`]). +/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`GossipSync`] with a [`NetworkGraph`] +/// is provided to [`BackgroundProcessor::start`]). /// /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied /// upon as doing so may result in high latency. @@ -79,7 +82,7 @@ const PING_TIMER: u64 = 1; /// Prune the network graph of stale entries hourly. const NETWORK_PRUNE_TIMER: u64 = 60 * 60; -#[cfg(all(not(test), debug_assertions))] +#[cfg(not(test))] const SCORER_PERSIST_TIMER: u64 = 30; #[cfg(test)] const SCORER_PERSIST_TIMER: u64 = 1; @@ -89,36 +92,333 @@ const FIRST_NETWORK_PRUNE_TIMER: u64 = 60; #[cfg(test)] const FIRST_NETWORK_PRUNE_TIMER: u64 = 1; +/// Either [`P2PGossipSync`] or [`RapidGossipSync`]. +pub enum GossipSync< + P: Deref>, + R: Deref>, + G: Deref>, + A: Deref, + L: Deref, +> +where A::Target: chain::Access, L::Target: Logger { + /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7. + P2P(P), + /// Rapid gossip sync from a trusted server. + Rapid(R), + /// No gossip sync. + None, +} + +impl< + P: Deref>, + R: Deref>, + G: Deref>, + A: Deref, + L: Deref, +> GossipSync +where A::Target: chain::Access, L::Target: Logger { + fn network_graph(&self) -> Option<&G> { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::None => None, + } + } + + fn prunable_network_graph(&self) -> Option<&G> { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::Rapid(gossip_sync) => { + if gossip_sync.is_initial_sync_complete() { + Some(gossip_sync.network_graph()) + } else { + None + } + }, + GossipSync::None => None, + } + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl>, G: Deref>, A: Deref, L: Deref> + GossipSync, G, A, L> +where + A::Target: chain::Access, + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::P2P`] variant. + pub fn p2p(gossip_sync: P) -> Self { + GossipSync::P2P(gossip_sync) + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl<'a, R: Deref>, G: Deref>, L: Deref> + GossipSync< + &P2PGossipSync, + R, + G, + &'a (dyn chain::Access + Send + Sync), + L, + > +where + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::Rapid`] variant. + pub fn rapid(gossip_sync: R) -> Self { + GossipSync::Rapid(gossip_sync) + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl<'a, L: Deref> + GossipSync< + &P2PGossipSync<&'a NetworkGraph, &'a (dyn chain::Access + Send + Sync), L>, + &RapidGossipSync<&'a NetworkGraph, L>, + &'a NetworkGraph, + &'a (dyn chain::Access + Send + Sync), + L, + > +where + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::None`] variant. + pub fn none() -> Self { + GossipSync::None + } +} /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s. struct DecoratingEventHandler< + 'a, E: EventHandler, - P: Deref>, - G: Deref, + PGS: Deref>, + RGS: Deref>, + G: Deref>, A: Deref, L: Deref, > where A::Target: chain::Access, L::Target: Logger { event_handler: E, - p2p_gossip_sync: Option

, + gossip_sync: &'a GossipSync, } impl< + 'a, E: EventHandler, - P: Deref>, - G: Deref, + PGS: Deref>, + RGS: Deref>, + G: Deref>, A: Deref, L: Deref, -> EventHandler for DecoratingEventHandler +> EventHandler for DecoratingEventHandler<'a, E, PGS, RGS, G, A, L> where A::Target: chain::Access, L::Target: Logger { fn handle_event(&self, event: &Event) { - if let Some(event_handler) = &self.p2p_gossip_sync { - event_handler.handle_event(event); + if let Some(network_graph) = self.gossip_sync.network_graph() { + network_graph.handle_event(event); } self.event_handler.handle_event(event); } } +macro_rules! define_run_body { + ($persister: ident, $event_handler: ident, $chain_monitor: ident, $channel_manager: ident, + $gossip_sync: ident, $peer_manager: ident, $logger: ident, $scorer: ident, + $loop_exit_check: expr, $await: expr) + => { { + let event_handler = DecoratingEventHandler { + event_handler: $event_handler, + gossip_sync: &$gossip_sync, + }; + + log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup"); + $channel_manager.timer_tick_occurred(); + + let mut last_freshness_call = Instant::now(); + let mut last_ping_call = Instant::now(); + let mut last_prune_call = Instant::now(); + let mut last_scorer_persist_call = Instant::now(); + let mut have_pruned = false; + + loop { + $channel_manager.process_pending_events(&event_handler); + $chain_monitor.process_pending_events(&event_handler); + + // Note that the PeerManager::process_events may block on ChannelManager's locks, + // hence it comes last here. When the ChannelManager finishes whatever it's doing, + // we want to ensure we get into `persist_manager` as quickly as we can, especially + // without running the normal event processing above and handing events to users. + // + // Specifically, on an *extremely* slow machine, we may see ChannelManager start + // processing a message effectively at any point during this loop. In order to + // minimize the time between such processing completing and persisting the updated + // ChannelManager, we want to minimize methods blocking on a ChannelManager + // generally, and as a fallback place such blocking only immediately before + // persistence. + $peer_manager.process_events(); + + // We wait up to 100ms, but track how long it takes to detect being put to sleep, + // see `await_start`'s use below. + let await_start = Instant::now(); + let updates_available = $await; + let await_time = await_start.elapsed(); + + if updates_available { + log_trace!($logger, "Persisting ChannelManager..."); + $persister.persist_manager(&*$channel_manager)?; + log_trace!($logger, "Done persisting ChannelManager."); + } + // Exit the loop if the background processor was requested to stop. + if $loop_exit_check { + log_trace!($logger, "Terminating background processor."); + break; + } + if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER { + log_trace!($logger, "Calling ChannelManager's timer_tick_occurred"); + $channel_manager.timer_tick_occurred(); + last_freshness_call = Instant::now(); + } + if await_time > Duration::from_secs(1) { + // On various platforms, we may be starved of CPU cycles for several reasons. + // E.g. on iOS, if we've been in the background, we will be entirely paused. + // Similarly, if we're on a desktop platform and the device has been asleep, we + // may not get any cycles. + // We detect this by checking if our max-100ms-sleep, above, ran longer than a + // full second, at which point we assume sockets may have been killed (they + // appear to be at least on some platforms, even if it has only been a second). + // Note that we have to take care to not get here just because user event + // processing was slow at the top of the loop. For example, the sample client + // may call Bitcoin Core RPCs during event handling, which very often takes + // more than a handful of seconds to complete, and shouldn't disconnect all our + // peers. + log_trace!($logger, "100ms sleep took more than a second, disconnecting peers."); + $peer_manager.disconnect_all_peers(); + last_ping_call = Instant::now(); + } else if last_ping_call.elapsed().as_secs() > PING_TIMER { + log_trace!($logger, "Calling PeerManager's timer_tick_occurred"); + $peer_manager.timer_tick_occurred(); + last_ping_call = Instant::now(); + } + + // Note that we want to run a graph prune once not long after startup before + // falling back to our usual hourly prunes. This avoids short-lived clients never + // pruning their network graph. We run once 60 seconds after startup before + // continuing our normal cadence. + if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } { + // The network graph must not be pruned while rapid sync completion is pending + log_trace!($logger, "Assessing prunability of network graph"); + if let Some(network_graph) = $gossip_sync.prunable_network_graph() { + network_graph.remove_stale_channels(); + + if let Err(e) = $persister.persist_graph(network_graph) { + log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e) + } + + last_prune_call = Instant::now(); + have_pruned = true; + } else { + log_trace!($logger, "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph."); + } + } + + if last_scorer_persist_call.elapsed().as_secs() > SCORER_PERSIST_TIMER { + if let Some(ref scorer) = $scorer { + log_trace!($logger, "Persisting scorer"); + if let Err(e) = $persister.persist_scorer(&scorer) { + log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) + } + } + last_scorer_persist_call = Instant::now(); + } + } + + // After we exit, ensure we persist the ChannelManager one final time - this avoids + // some races where users quit while channel updates were in-flight, with + // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. + $persister.persist_manager(&*$channel_manager)?; + + // Persist Scorer on exit + if let Some(ref scorer) = $scorer { + $persister.persist_scorer(&scorer)?; + } + + // Persist NetworkGraph on exit + if let Some(network_graph) = $gossip_sync.network_graph() { + $persister.persist_graph(network_graph)?; + } + + Ok(()) + } } +} + +/// Processes background events in a future. +/// +/// `sleeper` should return a future which completes in the given amount of time and returns a +/// boolean indicating whether the background processing should continue. Once `sleeper` returns a +/// future which outputs false, the loop will exit and this function's future will complete. +/// +/// See [`BackgroundProcessor::start`] for information on which actions this handles. +#[cfg(feature = "futures")] +pub async fn process_events_async< + 'a, + Signer: 'static + Sign, + CA: 'static + Deref + Send + Sync, + CF: 'static + Deref + Send + Sync, + CW: 'static + Deref + Send + Sync, + T: 'static + Deref + Send + Sync, + K: 'static + Deref + Send + Sync, + F: 'static + Deref + Send + Sync, + G: 'static + Deref> + Send + Sync, + L: 'static + Deref + Send + Sync, + P: 'static + Deref + Send + Sync, + Descriptor: 'static + SocketDescriptor + Send + Sync, + CMH: 'static + Deref + Send + Sync, + RMH: 'static + Deref + Send + Sync, + EH: 'static + EventHandler + Send, + PS: 'static + Deref + Send, + M: 'static + Deref> + Send + Sync, + CM: 'static + Deref> + Send + Sync, + PGS: 'static + Deref> + Send + Sync, + RGS: 'static + Deref> + Send, + UMH: 'static + Deref + Send + Sync, + PM: 'static + Deref> + Send + Sync, + S: 'static + Deref + Send + Sync, + SC: WriteableScore<'a>, + SleepFuture: core::future::Future, + Sleeper: Fn(Duration) -> SleepFuture +>( + persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM, + gossip_sync: GossipSync, peer_manager: PM, logger: L, scorer: Option, + sleeper: Sleeper, +) -> Result<(), std::io::Error> +where + CA::Target: 'static + chain::Access, + CF::Target: 'static + chain::Filter, + CW::Target: 'static + chain::Watch, + T::Target: 'static + BroadcasterInterface, + K::Target: 'static + KeysInterface, + F::Target: 'static + FeeEstimator, + L::Target: 'static + Logger, + P::Target: 'static + Persist, + CMH::Target: 'static + ChannelMessageHandler, + RMH::Target: 'static + RoutingMessageHandler, + UMH::Target: 'static + CustomMessageHandler, + PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>, +{ + let mut should_continue = true; + define_run_body!(persister, event_handler, chain_monitor, channel_manager, + gossip_sync, peer_manager, logger, scorer, should_continue, { + select! { + _ = channel_manager.get_persistable_update_future().fuse() => true, + cont = sleeper(Duration::from_millis(100)).fuse() => { + should_continue = cont; + false + } + } + }) +} + impl BackgroundProcessor { /// Start a background thread that takes care of responsibilities enumerated in the [top-level /// documentation]. @@ -134,9 +434,9 @@ impl BackgroundProcessor { /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's /// provided implementation. /// - /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk. See - /// [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. See the `lightning-persister` crate - /// for LDK's provided implementation. + /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if + /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. + /// See the `lightning-persister` crate for LDK's provided implementation. /// /// Typically, users should either implement [`Persister::persist_manager`] to never return an /// error or call [`join`] and handle any error that may arise. For the latter case, @@ -151,9 +451,9 @@ impl BackgroundProcessor { /// /// # Rapid Gossip Sync /// - /// If rapid gossip sync is meant to run at startup, pass an optional [`RapidGossipSync`] - /// to `rapid_gossip_sync` to indicate to [`BackgroundProcessor`] not to prune the - /// [`NetworkGraph`] instance until the [`RapidGossipSync`] instance completes its first sync. + /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync` + /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance + /// until the [`RapidGossipSync`] instance completes its first sync. /// /// [top-level documentation]: BackgroundProcessor /// [`join`]: Self::join @@ -162,8 +462,8 @@ impl BackgroundProcessor { /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph - /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph - /// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable + /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph + /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable pub fn start< 'a, Signer: 'static + Sign, @@ -173,7 +473,7 @@ impl BackgroundProcessor { T: 'static + Deref + Send + Sync, K: 'static + Deref + Send + Sync, F: 'static + Deref + Send + Sync, - G: 'static + Deref + Send + Sync, + G: 'static + Deref> + Send + Sync, L: 'static + Deref + Send + Sync, P: 'static + Deref + Send + Sync, Descriptor: 'static + SocketDescriptor + Send + Sync, @@ -184,15 +484,14 @@ impl BackgroundProcessor { M: 'static + Deref> + Send + Sync, CM: 'static + Deref> + Send + Sync, PGS: 'static + Deref> + Send + Sync, + RGS: 'static + Deref> + Send, UMH: 'static + Deref + Send + Sync, PM: 'static + Deref> + Send + Sync, S: 'static + Deref + Send + Sync, SC: WriteableScore<'a>, - RGS: 'static + Deref> + Send >( persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM, - p2p_gossip_sync: Option, peer_manager: PM, logger: L, scorer: Option, - rapid_gossip_sync: Option + gossip_sync: GossipSync, peer_manager: PM, logger: L, scorer: Option, ) -> Self where CA::Target: 'static + chain::Access, @@ -211,137 +510,9 @@ impl BackgroundProcessor { let stop_thread = Arc::new(AtomicBool::new(false)); let stop_thread_clone = stop_thread.clone(); let handle = thread::spawn(move || -> Result<(), std::io::Error> { - let event_handler = DecoratingEventHandler { event_handler, p2p_gossip_sync: p2p_gossip_sync.as_ref().map(|t| t.deref()) }; - - log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); - channel_manager.timer_tick_occurred(); - - let mut last_freshness_call = Instant::now(); - let mut last_ping_call = Instant::now(); - let mut last_prune_call = Instant::now(); - let mut last_scorer_persist_call = Instant::now(); - let mut have_pruned = false; - - loop { - channel_manager.process_pending_events(&event_handler); - chain_monitor.process_pending_events(&event_handler); - - // Note that the PeerManager::process_events may block on ChannelManager's locks, - // hence it comes last here. When the ChannelManager finishes whatever it's doing, - // we want to ensure we get into `persist_manager` as quickly as we can, especially - // without running the normal event processing above and handing events to users. - // - // Specifically, on an *extremely* slow machine, we may see ChannelManager start - // processing a message effectively at any point during this loop. In order to - // minimize the time between such processing completing and persisting the updated - // ChannelManager, we want to minimize methods blocking on a ChannelManager - // generally, and as a fallback place such blocking only immediately before - // persistence. - peer_manager.process_events(); - - // We wait up to 100ms, but track how long it takes to detect being put to sleep, - // see `await_start`'s use below. - let await_start = Instant::now(); - let updates_available = - channel_manager.await_persistable_update_timeout(Duration::from_millis(100)); - let await_time = await_start.elapsed(); - - if updates_available { - log_trace!(logger, "Persisting ChannelManager..."); - persister.persist_manager(&*channel_manager)?; - log_trace!(logger, "Done persisting ChannelManager."); - } - // Exit the loop if the background processor was requested to stop. - if stop_thread.load(Ordering::Acquire) == true { - log_trace!(logger, "Terminating background processor."); - break; - } - if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER { - log_trace!(logger, "Calling ChannelManager's timer_tick_occurred"); - channel_manager.timer_tick_occurred(); - last_freshness_call = Instant::now(); - } - if await_time > Duration::from_secs(1) { - // On various platforms, we may be starved of CPU cycles for several reasons. - // E.g. on iOS, if we've been in the background, we will be entirely paused. - // Similarly, if we're on a desktop platform and the device has been asleep, we - // may not get any cycles. - // We detect this by checking if our max-100ms-sleep, above, ran longer than a - // full second, at which point we assume sockets may have been killed (they - // appear to be at least on some platforms, even if it has only been a second). - // Note that we have to take care to not get here just because user event - // processing was slow at the top of the loop. For example, the sample client - // may call Bitcoin Core RPCs during event handling, which very often takes - // more than a handful of seconds to complete, and shouldn't disconnect all our - // peers. - log_trace!(logger, "100ms sleep took more than a second, disconnecting peers."); - peer_manager.disconnect_all_peers(); - last_ping_call = Instant::now(); - } else if last_ping_call.elapsed().as_secs() > PING_TIMER { - log_trace!(logger, "Calling PeerManager's timer_tick_occurred"); - peer_manager.timer_tick_occurred(); - last_ping_call = Instant::now(); - } - - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. - if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } { - // The network graph must not be pruned while rapid sync completion is pending - log_trace!(logger, "Assessing prunability of network graph"); - let graph_to_prune = match rapid_gossip_sync.as_ref() { - Some(rapid_sync) => { - if rapid_sync.is_initial_sync_complete() { - Some(rapid_sync.network_graph()) - } else { - None - } - }, - None => p2p_gossip_sync.as_ref().map(|sync| sync.network_graph()) - }; - - if let Some(network_graph_reference) = graph_to_prune { - network_graph_reference.remove_stale_channels(); - - if let Err(e) = persister.persist_graph(network_graph_reference) { - log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e) - } - - last_prune_call = Instant::now(); - have_pruned = true; - } else { - log_trace!(logger, "Not pruning network graph, either due to pending rapid gossip sync or absence of a prunable graph."); - } - } - - if last_scorer_persist_call.elapsed().as_secs() > SCORER_PERSIST_TIMER { - if let Some(ref scorer) = scorer { - log_trace!(logger, "Persisting scorer"); - if let Err(e) = persister.persist_scorer(&scorer) { - log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) - } - } - last_scorer_persist_call = Instant::now(); - } - } - - // After we exit, ensure we persist the ChannelManager one final time - this avoids - // some races where users quit while channel updates were in-flight, with - // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. - persister.persist_manager(&*channel_manager)?; - - // Persist Scorer on exit - if let Some(ref scorer) = scorer { - persister.persist_scorer(&scorer)?; - } - - // Persist NetworkGraph on exit - if let Some(ref gossip_sync) = p2p_gossip_sync { - persister.persist_graph(gossip_sync.network_graph())?; - } - - Ok(()) + define_run_body!(persister, event_handler, chain_monitor, channel_manager, + gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire), + channel_manager.await_persistable_update_timeout(Duration::from_millis(100))) }); Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) } } @@ -408,7 +579,7 @@ mod tests { use lightning::ln::features::{ChannelFeatures, InitFeatures}; use lightning::ln::msgs::{ChannelMessageHandler, Init}; use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler}; - use lightning::routing::network_graph::{NetworkGraph, P2PGossipSync}; + use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::util::config::UserConfig; use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent}; use lightning::util::ser::Writeable; @@ -424,7 +595,7 @@ mod tests { use std::time::Duration; use lightning::routing::scoring::{FixedPenaltyScorer}; use lightning_rapid_gossip_sync::RapidGossipSync; - use super::{BackgroundProcessor, FRESHNESS_TIMER}; + use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER}; const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER; @@ -440,18 +611,35 @@ mod tests { type ChainMonitor = chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; + type PGS = Arc>>, Arc, Arc>>; + type RGS = Arc>>, Arc>>; + struct Node { node: Arc>, - p2p_gossip_sync: Option, Arc, Arc>>>, + p2p_gossip_sync: PGS, + rapid_gossip_sync: RGS, peer_manager: Arc, Arc, Arc, IgnoringMessageHandler>>, chain_monitor: Arc, persister: Arc, tx_broadcaster: Arc, - network_graph: Arc, + network_graph: Arc>>, logger: Arc, best_block: BestBlock, scorer: Arc>, - rapid_gossip_sync: Option>>> + } + + impl Node { + fn p2p_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::P2P(self.p2p_gossip_sync.clone()) + } + + fn rapid_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::Rapid(self.rapid_gossip_sync.clone()) + } + + fn no_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::None + } } impl Drop for Node { @@ -546,13 +734,13 @@ mod tests { let best_block = BestBlock::from_genesis(network); let params = ChainParameters { network, best_block }; let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params)); - let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash())); - let p2p_gossip_sync = Some(Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()))); + let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash(), logger.clone())); + let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())); + let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone())); let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )}; let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{})); let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0))); - let rapid_gossip_sync = None; - let node = Node { node: manager, p2p_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer, rapid_gossip_sync }; + let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer }; nodes.push(node); } @@ -650,7 +838,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); macro_rules! check_persisted_data { ($node: expr, $filepath: expr) => { @@ -685,7 +873,7 @@ mod tests { } // Force-close the channel. - nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); // Check that the force-close updates are persisted. check_persisted_data!(nodes[0].node, filepath.clone()); @@ -695,10 +883,7 @@ mod tests { // Check network graph is persisted let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string()); - if let Some(ref handler) = nodes[0].p2p_gossip_sync { - let network_graph = handler.network_graph(); - check_persisted_data!(network_graph, filepath.clone()); - } + check_persisted_data!(nodes[0].network_graph, filepath.clone()); // Check scorer is persisted let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string()); @@ -715,7 +900,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string(); @@ -738,7 +923,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.join() { Ok(_) => panic!("Expected error persisting manager"), Err(e) => { @@ -755,7 +940,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.stop() { Ok(_) => panic!("Expected error persisting network graph"), @@ -773,7 +958,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.stop() { Ok(_) => panic!("Expected error persisting scorer"), @@ -796,7 +981,7 @@ mod tests { let event_handler = move |event: &Event| { sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(); }; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Open a channel and check that the FundingGenerationReady event was handled. begin_open_channel!(nodes[0], nodes[1], channel_value); @@ -821,10 +1006,10 @@ mod tests { let (sender, receiver) = std::sync::mpsc::sync_channel(1); let event_handler = move |event: &Event| sender.send(event.clone()).unwrap(); let persister = Arc::new(Persister::new(data_dir)); - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Force close the channel and check that the SpendableOutputs event was handled. - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap(); confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32); let event = receiver @@ -845,7 +1030,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); @@ -865,7 +1050,6 @@ mod tests { let (sender, receiver) = std::sync::mpsc::sync_channel(1); let persister = Arc::new(Persister::new(data_dir.clone()).with_graph_persistence_notifier(sender)); let network_graph = nodes[0].network_graph.clone(); - let rapid_sync = Arc::new(RapidGossipSync::new(network_graph.clone())); let features = ChannelFeatures::empty(); network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id()) .expect("Failed to update channel from partial announcement"); @@ -874,7 +1058,7 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 1); let event_handler = |_: &_| {}; - let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), Some(rapid_sync.clone())); + let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); @@ -901,7 +1085,7 @@ mod tests { 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0, 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192, ]; - rapid_sync.update_network_graph(&initialization_input[..]).unwrap(); + nodes[0].rapid_gossip_sync.update_network_graph(&initialization_input[..]).unwrap(); // this should have added two channels assert_eq!(network_graph.read_only().channels().len(), 3); @@ -928,7 +1112,7 @@ mod tests { let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes); let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, Retry::Attempts(2))); let event_handler = Arc::clone(&invoice_payer); - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); assert!(bg_processor.stop().is_ok()); } }