X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-background-processor%2Fsrc%2Flib.rs;h=c4e8ea5c2b35ade6024df4821bb0ca17a45b2362;hb=a19cb0e969113500a26c7b902cbc381fea32ca46;hp=0fab3e61a4af982fb49f14d89865133af29dad45;hpb=ab20284e2673e946ebdffcd5be7388de917a168f;p=rust-lightning diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 0fab3e61..c4e8ea5c 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -18,8 +18,8 @@ use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::ln::channelmanager::ChannelManager; use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor}; -use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; -use lightning::routing::scoring::WriteableScore; +use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; +use lightning::routing::scoring::{Score, MultiThreadedLockableScore}; use lightning::util::events::{Event, EventHandler, EventsProvider}; use lightning::util::logger::Logger; use lightning::util::persist::Persister; @@ -40,8 +40,8 @@ use std::ops::Deref; /// [`ChannelManager`] persistence should be done in the background. /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] /// at the appropriate intervals. -/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to -/// [`BackgroundProcessor::start`]). +/// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`GossipSync`] with a [`NetworkGraph`] +/// is provided to [`BackgroundProcessor::start`]). /// /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied /// upon as doing so may result in high latency. @@ -79,7 +79,7 @@ const PING_TIMER: u64 = 1; /// Prune the network graph of stale entries hourly. const NETWORK_PRUNE_TIMER: u64 = 60 * 60; -#[cfg(all(not(test), debug_assertions))] +#[cfg(not(test))] const SCORER_PERSIST_TIMER: u64 = 30; #[cfg(test)] const SCORER_PERSIST_TIMER: u64 = 1; @@ -89,31 +89,131 @@ const FIRST_NETWORK_PRUNE_TIMER: u64 = 60; #[cfg(test)] const FIRST_NETWORK_PRUNE_TIMER: u64 = 1; +/// Either [`P2PGossipSync`] or [`RapidGossipSync`]. +pub enum GossipSync< + P: Deref>, + R: Deref>, + G: Deref>, + A: Deref, + L: Deref, +> +where A::Target: chain::Access, L::Target: Logger { + /// Gossip sync via the lightning peer-to-peer network as defined by BOLT 7. + P2P(P), + /// Rapid gossip sync from a trusted server. + Rapid(R), + /// No gossip sync. + None, +} + +impl< + P: Deref>, + R: Deref>, + G: Deref>, + A: Deref, + L: Deref, +> GossipSync +where A::Target: chain::Access, L::Target: Logger { + fn network_graph(&self) -> Option<&G> { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::Rapid(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::None => None, + } + } + + fn prunable_network_graph(&self) -> Option<&G> { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.network_graph()), + GossipSync::Rapid(gossip_sync) => { + if gossip_sync.is_initial_sync_complete() { + Some(gossip_sync.network_graph()) + } else { + None + } + }, + GossipSync::None => None, + } + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl>, G: Deref>, A: Deref, L: Deref> + GossipSync, G, A, L> +where + A::Target: chain::Access, + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::P2P`] variant. + pub fn p2p(gossip_sync: P) -> Self { + GossipSync::P2P(gossip_sync) + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl<'a, R: Deref>, G: Deref>, L: Deref> + GossipSync< + &P2PGossipSync, + R, + G, + &'a (dyn chain::Access + Send + Sync), + L, + > +where + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::Rapid`] variant. + pub fn rapid(gossip_sync: R) -> Self { + GossipSync::Rapid(gossip_sync) + } +} + +/// (C-not exported) as the bindings concretize everything and have constructors for us +impl<'a, L: Deref> + GossipSync< + &P2PGossipSync<&'a NetworkGraph, &'a (dyn chain::Access + Send + Sync), L>, + &RapidGossipSync<&'a NetworkGraph, L>, + &'a NetworkGraph, + &'a (dyn chain::Access + Send + Sync), + L, + > +where + L::Target: Logger, +{ + /// Initializes a new [`GossipSync::None`] variant. + pub fn none() -> Self { + GossipSync::None + } +} /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s. struct DecoratingEventHandler< + 'a, E: EventHandler, - N: Deref>, - G: Deref, + PGS: Deref>, + RGS: Deref>, + G: Deref>, A: Deref, L: Deref, > where A::Target: chain::Access, L::Target: Logger { event_handler: E, - net_graph_msg_handler: Option, + gossip_sync: &'a GossipSync, } impl< + 'a, E: EventHandler, - N: Deref>, - G: Deref, + PGS: Deref>, + RGS: Deref>, + G: Deref>, A: Deref, L: Deref, -> EventHandler for DecoratingEventHandler +> EventHandler for DecoratingEventHandler<'a, E, PGS, RGS, G, A, L> where A::Target: chain::Access, L::Target: Logger { fn handle_event(&self, event: &Event) { - if let Some(event_handler) = &self.net_graph_msg_handler { - event_handler.handle_event(event); + if let Some(network_graph) = self.gossip_sync.network_graph() { + network_graph.handle_event(event); } self.event_handler.handle_event(event); } @@ -134,9 +234,9 @@ impl BackgroundProcessor { /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's /// provided implementation. /// - /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk. See - /// [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. See the `lightning-persister` crate - /// for LDK's provided implementation. + /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk, if + /// [`GossipSync`] is supplied. See [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. + /// See the `lightning-persister` crate for LDK's provided implementation. /// /// Typically, users should either implement [`Persister::persist_manager`] to never return an /// error or call [`join`] and handle any error that may arise. For the latter case, @@ -147,13 +247,13 @@ impl BackgroundProcessor { /// `event_handler` is responsible for handling events that users should be notified of (e.g., /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common /// functionality implemented by other handlers. - /// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures. + /// * [`P2PGossipSync`] if given will update the [`NetworkGraph`] based on payment failures. /// /// # Rapid Gossip Sync /// - /// If rapid gossip sync is meant to run at startup, pass an optional [`RapidGossipSync`] - /// to `rapid_gossip_sync` to indicate to [`BackgroundProcessor`] not to prune the - /// [`NetworkGraph`] instance until the [`RapidGossipSync`] instance completes its first sync. + /// If rapid gossip sync is meant to run at startup, pass [`RapidGossipSync`] via `gossip_sync` + /// to indicate that the [`BackgroundProcessor`] should not prune the [`NetworkGraph`] instance + /// until the [`RapidGossipSync`] instance completes its first sync. /// /// [top-level documentation]: BackgroundProcessor /// [`join`]: Self::join @@ -162,8 +262,8 @@ impl BackgroundProcessor { /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph - /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph - /// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable + /// [`NetworkGraph`]: lightning::routing::gossip::NetworkGraph + /// [`NetworkGraph::write`]: lightning::routing::gossip::NetworkGraph#impl-Writeable pub fn start< 'a, Signer: 'static + Sign, @@ -173,7 +273,7 @@ impl BackgroundProcessor { T: 'static + Deref + Send + Sync, K: 'static + Deref + Send + Sync, F: 'static + Deref + Send + Sync, - G: 'static + Deref + Send + Sync, + G: 'static + Deref> + Send + Sync, L: 'static + Deref + Send + Sync, P: 'static + Deref + Send + Sync, Descriptor: 'static + SocketDescriptor + Send + Sync, @@ -183,16 +283,14 @@ impl BackgroundProcessor { PS: 'static + Deref + Send, M: 'static + Deref> + Send + Sync, CM: 'static + Deref> + Send + Sync, - NG: 'static + Deref> + Send + Sync, + PGS: 'static + Deref> + Send + Sync, + RGS: 'static + Deref> + Send, UMH: 'static + Deref + Send + Sync, PM: 'static + Deref> + Send + Sync, - S: 'static + Deref + Send + Sync, - SC: WriteableScore<'a>, - RGS: 'static + Deref> + Send + SC: Score + Send, >( persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM, - net_graph_msg_handler: Option, peer_manager: PM, logger: L, scorer: Option, - rapid_gossip_sync: Option + gossip_sync: GossipSync, peer_manager: PM, logger: L, scorer: Option<&'static MultiThreadedLockableScore>, ) -> Self where CA::Target: 'static + chain::Access, @@ -211,7 +309,10 @@ impl BackgroundProcessor { let stop_thread = Arc::new(AtomicBool::new(false)); let stop_thread_clone = stop_thread.clone(); let handle = thread::spawn(move || -> Result<(), std::io::Error> { - let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) }; + let event_handler = DecoratingEventHandler { + event_handler, + gossip_sync: &gossip_sync, + }; log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); channel_manager.timer_tick_occurred(); @@ -290,21 +391,10 @@ impl BackgroundProcessor { if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } { // The network graph must not be pruned while rapid sync completion is pending log_trace!(logger, "Assessing prunability of network graph"); - let graph_to_prune = match rapid_gossip_sync.as_ref() { - Some(rapid_sync) => { - if rapid_sync.is_initial_sync_complete() { - Some(rapid_sync.network_graph()) - } else { - None - } - }, - None => net_graph_msg_handler.as_ref().map(|handler| handler.network_graph()) - }; + if let Some(network_graph) = gossip_sync.prunable_network_graph() { + network_graph.remove_stale_channels(); - if let Some(network_graph_reference) = graph_to_prune { - network_graph_reference.remove_stale_channels(); - - if let Err(e) = persister.persist_graph(network_graph_reference) { + if let Err(e) = persister.persist_graph(network_graph) { log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e) } @@ -337,8 +427,8 @@ impl BackgroundProcessor { } // Persist NetworkGraph on exit - if let Some(ref handler) = net_graph_msg_handler { - persister.persist_graph(handler.network_graph())?; + if let Some(network_graph) = gossip_sync.network_graph() { + persister.persist_graph(network_graph)?; } Ok(()) @@ -408,7 +498,7 @@ mod tests { use lightning::ln::features::{ChannelFeatures, InitFeatures}; use lightning::ln::msgs::{ChannelMessageHandler, Init}; use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler}; - use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler}; + use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::util::config::UserConfig; use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent}; use lightning::util::ser::Writeable; @@ -424,7 +514,7 @@ mod tests { use std::time::Duration; use lightning::routing::scoring::{FixedPenaltyScorer}; use lightning_rapid_gossip_sync::RapidGossipSync; - use super::{BackgroundProcessor, FRESHNESS_TIMER}; + use super::{BackgroundProcessor, GossipSync, FRESHNESS_TIMER}; const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER; @@ -440,18 +530,35 @@ mod tests { type ChainMonitor = chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; + type PGS = Arc>>, Arc, Arc>>; + type RGS = Arc>>, Arc>>; + struct Node { node: Arc>, - net_graph_msg_handler: Option, Arc, Arc>>>, + p2p_gossip_sync: PGS, + rapid_gossip_sync: RGS, peer_manager: Arc, Arc, Arc, IgnoringMessageHandler>>, chain_monitor: Arc, persister: Arc, tx_broadcaster: Arc, - network_graph: Arc, + network_graph: Arc>>, logger: Arc, best_block: BestBlock, scorer: Arc>, - rapid_gossip_sync: Option>>> + } + + impl Node { + fn p2p_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::P2P(self.p2p_gossip_sync.clone()) + } + + fn rapid_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::Rapid(self.rapid_gossip_sync.clone()) + } + + fn no_gossip_sync(&self) -> GossipSync>>, Arc, Arc> { + GossipSync::None + } } impl Drop for Node { @@ -546,13 +653,13 @@ mod tests { let best_block = BestBlock::from_genesis(network); let params = ChainParameters { network, best_block }; let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params)); - let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash())); - let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone()))); + let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash(), logger.clone())); + let p2p_gossip_sync = Arc::new(P2PGossipSync::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())); + let rapid_gossip_sync = Arc::new(RapidGossipSync::new(network_graph.clone())); let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )}; let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{})); let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0))); - let rapid_gossip_sync = None; - let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer, rapid_gossip_sync }; + let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer }; nodes.push(node); } @@ -650,7 +757,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); macro_rules! check_persisted_data { ($node: expr, $filepath: expr) => { @@ -685,7 +792,7 @@ mod tests { } // Force-close the channel. - nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); // Check that the force-close updates are persisted. check_persisted_data!(nodes[0].node, filepath.clone()); @@ -695,10 +802,7 @@ mod tests { // Check network graph is persisted let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string()); - if let Some(ref handler) = nodes[0].net_graph_msg_handler { - let network_graph = handler.network_graph(); - check_persisted_data!(network_graph, filepath.clone()); - } + check_persisted_data!(nodes[0].network_graph, filepath.clone()); // Check scorer is persisted let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string()); @@ -715,7 +819,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string(); @@ -738,7 +842,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.join() { Ok(_) => panic!("Expected error persisting manager"), Err(e) => { @@ -755,7 +859,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.stop() { Ok(_) => panic!("Expected error persisting network graph"), @@ -773,7 +877,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test")); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); match bg_processor.stop() { Ok(_) => panic!("Expected error persisting scorer"), @@ -796,7 +900,7 @@ mod tests { let event_handler = move |event: &Event| { sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap(); }; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Open a channel and check that the FundingGenerationReady event was handled. begin_open_channel!(nodes[0], nodes[1], channel_value); @@ -821,10 +925,10 @@ mod tests { let (sender, receiver) = std::sync::mpsc::sync_channel(1); let event_handler = move |event: &Event| sender.send(event.clone()).unwrap(); let persister = Arc::new(Persister::new(data_dir)); - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); // Force close the channel and check that the SpendableOutputs event was handled. - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap(); confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32); let event = receiver @@ -845,7 +949,7 @@ mod tests { let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); let event_handler = |_: &_| {}; - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); @@ -865,7 +969,6 @@ mod tests { let (sender, receiver) = std::sync::mpsc::sync_channel(1); let persister = Arc::new(Persister::new(data_dir.clone()).with_graph_persistence_notifier(sender)); let network_graph = nodes[0].network_graph.clone(); - let rapid_sync = Arc::new(RapidGossipSync::new(network_graph.clone())); let features = ChannelFeatures::empty(); network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id()) .expect("Failed to update channel from partial announcement"); @@ -874,7 +977,7 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 1); let event_handler = |_: &_| {}; - let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), Some(rapid_sync.clone())); + let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); @@ -901,7 +1004,7 @@ mod tests { 0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0, 0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192, ]; - rapid_sync.update_network_graph(&initialization_input[..]).unwrap(); + nodes[0].rapid_gossip_sync.update_network_graph(&initialization_input[..]).unwrap(); // this should have added two channels assert_eq!(network_graph.read_only().channels().len(), 3); @@ -928,7 +1031,7 @@ mod tests { let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes); let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, Retry::Attempts(2))); let event_handler = Arc::clone(&invoice_payer); - let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()), nodes[0].rapid_gossip_sync.clone()); + let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); assert!(bg_processor.stop().is_ok()); } }