1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(broken_intra_doc_links)]
9 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
11 #[macro_use] extern crate lightning;
14 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
15 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
16 use lightning::chain::keysinterface::{Sign, KeysInterface};
17 use lightning::ln::channelmanager::ChannelManager;
18 use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
19 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
20 use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
21 use lightning::routing::scoring::WriteableScore;
22 use lightning::util::events::{Event, EventHandler, EventsProvider};
23 use lightning::util::logger::Logger;
24 use lightning::util::persist::Persister;
26 use std::sync::atomic::{AtomicBool, Ordering};
28 use std::thread::JoinHandle;
29 use std::time::{Duration, Instant};
32 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
33 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
34 /// responsibilities are:
35 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
36 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
37 /// writing it to disk/backups by invoking the callback given to it at startup.
38 /// [`ChannelManager`] persistence should be done in the background.
39 /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
40 /// at the appropriate intervals.
41 /// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to
42 /// [`BackgroundProcessor::start`]).
44 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
45 /// upon as doing so may result in high latency.
49 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
50 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
51 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
52 /// unilateral chain closure fees are at risk.
54 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
55 /// [`Event`]: lightning::util::events::Event
56 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
57 pub struct BackgroundProcessor {
58 stop_thread: Arc<AtomicBool>,
59 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
63 const FRESHNESS_TIMER: u64 = 60;
65 const FRESHNESS_TIMER: u64 = 1;
67 #[cfg(all(not(test), not(debug_assertions)))]
68 const PING_TIMER: u64 = 10;
69 /// Signature operations take a lot longer without compiler optimisations.
70 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
71 /// timeout is reached.
72 #[cfg(all(not(test), debug_assertions))]
73 const PING_TIMER: u64 = 30;
75 const PING_TIMER: u64 = 1;
77 /// Prune the network graph of stale entries hourly.
78 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
81 const FIRST_NETWORK_PRUNE_TIMER: u64 = 60;
83 const FIRST_NETWORK_PRUNE_TIMER: u64 = 1;
86 /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
87 struct DecoratingEventHandler<
89 N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
90 G: Deref<Target = NetworkGraph>,
94 where A::Target: chain::Access, L::Target: Logger {
96 net_graph_msg_handler: Option<N>,
101 N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
102 G: Deref<Target = NetworkGraph>,
105 > EventHandler for DecoratingEventHandler<E, N, G, A, L>
106 where A::Target: chain::Access, L::Target: Logger {
107 fn handle_event(&self, event: &Event) {
108 if let Some(event_handler) = &self.net_graph_msg_handler {
109 event_handler.handle_event(event);
111 self.event_handler.handle_event(event);
115 impl BackgroundProcessor {
116 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
119 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
120 /// [`Persister::persist_manager`] returns an error. In case of an error, the error is retrieved by calling
121 /// either [`join`] or [`stop`].
123 /// # Data Persistence
125 /// [`Persister::persist_manager`] is responsible for writing out the [`ChannelManager`] to disk, and/or
126 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
127 /// [`ChannelManager`]. See the `lightning-persister` crate for LDK's
128 /// provided implementation.
130 /// [`Persister::persist_graph`] is responsible for writing out the [`NetworkGraph`] to disk. See
131 /// [`NetworkGraph::write`] for writing out a [`NetworkGraph`]. See the `lightning-persister` crate
132 /// for LDK's provided implementation.
134 /// Typically, users should either implement [`Persister::persist_manager`] to never return an
135 /// error or call [`join`] and handle any error that may arise. For the latter case,
136 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
140 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
141 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
142 /// functionality implemented by other handlers.
143 /// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures.
145 /// [top-level documentation]: BackgroundProcessor
146 /// [`join`]: Self::join
147 /// [`stop`]: Self::stop
148 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
149 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
150 /// [`Persister::persist_manager`]: lightning::util::persist::Persister::persist_manager
151 /// [`Persister::persist_graph`]: lightning::util::persist::Persister::persist_graph
152 /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
153 /// [`NetworkGraph::write`]: lightning::routing::network_graph::NetworkGraph#impl-Writeable
156 Signer: 'static + Sign,
157 CA: 'static + Deref + Send + Sync,
158 CF: 'static + Deref + Send + Sync,
159 CW: 'static + Deref + Send + Sync,
160 T: 'static + Deref + Send + Sync,
161 K: 'static + Deref + Send + Sync,
162 F: 'static + Deref + Send + Sync,
163 G: 'static + Deref<Target = NetworkGraph> + Send + Sync,
164 L: 'static + Deref + Send + Sync,
165 P: 'static + Deref + Send + Sync,
166 Descriptor: 'static + SocketDescriptor + Send + Sync,
167 CMH: 'static + Deref + Send + Sync,
168 RMH: 'static + Deref + Send + Sync,
169 EH: 'static + EventHandler + Send,
170 PS: 'static + Deref + Send,
171 M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
172 CM: 'static + Deref<Target = ChannelManager<Signer, CW, T, K, F, L>> + Send + Sync,
173 NG: 'static + Deref<Target = NetGraphMsgHandler<G, CA, L>> + Send + Sync,
174 UMH: 'static + Deref + Send + Sync,
175 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
176 S: 'static + Deref<Target = SC> + Send + Sync,
177 SC: WriteableScore<'a>,
179 persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM,
180 net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L, scorer: Option<S>
183 CA::Target: 'static + chain::Access,
184 CF::Target: 'static + chain::Filter,
185 CW::Target: 'static + chain::Watch<Signer>,
186 T::Target: 'static + BroadcasterInterface,
187 K::Target: 'static + KeysInterface<Signer = Signer>,
188 F::Target: 'static + FeeEstimator,
189 L::Target: 'static + Logger,
190 P::Target: 'static + Persist<Signer>,
191 CMH::Target: 'static + ChannelMessageHandler,
192 RMH::Target: 'static + RoutingMessageHandler,
193 UMH::Target: 'static + CustomMessageHandler,
194 PS::Target: 'static + Persister<'a, Signer, CW, T, K, F, L, SC>,
196 let stop_thread = Arc::new(AtomicBool::new(false));
197 let stop_thread_clone = stop_thread.clone();
198 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
199 let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) };
201 log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
202 channel_manager.timer_tick_occurred();
204 let mut last_freshness_call = Instant::now();
205 let mut last_ping_call = Instant::now();
206 let mut last_prune_call = Instant::now();
207 let mut have_pruned = false;
210 channel_manager.process_pending_events(&event_handler);
211 chain_monitor.process_pending_events(&event_handler);
213 // Note that the PeerManager::process_events may block on ChannelManager's locks,
214 // hence it comes last here. When the ChannelManager finishes whatever it's doing,
215 // we want to ensure we get into `persist_manager` as quickly as we can, especially
216 // without running the normal event processing above and handing events to users.
218 // Specifically, on an *extremely* slow machine, we may see ChannelManager start
219 // processing a message effectively at any point during this loop. In order to
220 // minimize the time between such processing completing and persisting the updated
221 // ChannelManager, we want to minimize methods blocking on a ChannelManager
222 // generally, and as a fallback place such blocking only immediately before
224 peer_manager.process_events();
226 // We wait up to 100ms, but track how long it takes to detect being put to sleep,
227 // see `await_start`'s use below.
228 let await_start = Instant::now();
229 let updates_available =
230 channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
231 let await_time = await_start.elapsed();
233 if updates_available {
234 log_trace!(logger, "Persisting ChannelManager...");
235 persister.persist_manager(&*channel_manager)?;
236 log_trace!(logger, "Done persisting ChannelManager.");
238 // Exit the loop if the background processor was requested to stop.
239 if stop_thread.load(Ordering::Acquire) == true {
240 log_trace!(logger, "Terminating background processor.");
243 if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
244 log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
245 channel_manager.timer_tick_occurred();
246 last_freshness_call = Instant::now();
248 if await_time > Duration::from_secs(1) {
249 // On various platforms, we may be starved of CPU cycles for several reasons.
250 // E.g. on iOS, if we've been in the background, we will be entirely paused.
251 // Similarly, if we're on a desktop platform and the device has been asleep, we
252 // may not get any cycles.
253 // We detect this by checking if our max-100ms-sleep, above, ran longer than a
254 // full second, at which point we assume sockets may have been killed (they
255 // appear to be at least on some platforms, even if it has only been a second).
256 // Note that we have to take care to not get here just because user event
257 // processing was slow at the top of the loop. For example, the sample client
258 // may call Bitcoin Core RPCs during event handling, which very often takes
259 // more than a handful of seconds to complete, and shouldn't disconnect all our
261 log_trace!(logger, "100ms sleep took more than a second, disconnecting peers.");
262 peer_manager.disconnect_all_peers();
263 last_ping_call = Instant::now();
264 } else if last_ping_call.elapsed().as_secs() > PING_TIMER {
265 log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
266 peer_manager.timer_tick_occurred();
267 last_ping_call = Instant::now();
270 // Note that we want to run a graph prune once not long after startup before
271 // falling back to our usual hourly prunes. This avoids short-lived clients never
272 // pruning their network graph. We run once 60 seconds after startup before
273 // continuing our normal cadence.
274 if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER } {
275 if let Some(ref handler) = net_graph_msg_handler {
276 log_trace!(logger, "Pruning network graph of stale entries");
277 handler.network_graph().remove_stale_channels();
278 if let Err(e) = persister.persist_graph(handler.network_graph()) {
279 log_error!(logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
281 last_prune_call = Instant::now();
284 if let Some(ref scorer) = scorer {
285 log_trace!(logger, "Persisting scorer");
286 if let Err(e) = persister.persist_scorer(&scorer) {
287 log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
293 // After we exit, ensure we persist the ChannelManager one final time - this avoids
294 // some races where users quit while channel updates were in-flight, with
295 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
296 persister.persist_manager(&*channel_manager)?;
298 // Persist Scorer on exit
299 if let Some(ref scorer) = scorer {
300 persister.persist_scorer(&scorer)?;
303 // Persist NetworkGraph on exit
304 if let Some(ref handler) = net_graph_msg_handler {
305 persister.persist_graph(handler.network_graph())?;
310 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
313 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
314 /// [`ChannelManager`].
318 /// This function panics if the background thread has panicked such as while persisting or
321 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
322 pub fn join(mut self) -> Result<(), std::io::Error> {
323 assert!(self.thread_handle.is_some());
327 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
328 /// [`ChannelManager`].
332 /// This function panics if the background thread has panicked such as while persisting or
335 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
336 pub fn stop(mut self) -> Result<(), std::io::Error> {
337 assert!(self.thread_handle.is_some());
338 self.stop_and_join_thread()
341 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
342 self.stop_thread.store(true, Ordering::Release);
346 fn join_thread(&mut self) -> Result<(), std::io::Error> {
347 match self.thread_handle.take() {
348 Some(handle) => handle.join().unwrap(),
354 impl Drop for BackgroundProcessor {
356 self.stop_and_join_thread().unwrap();
362 use bitcoin::blockdata::block::BlockHeader;
363 use bitcoin::blockdata::constants::genesis_block;
364 use bitcoin::blockdata::transaction::{Transaction, TxOut};
365 use bitcoin::network::constants::Network;
366 use lightning::chain::{BestBlock, Confirm, chainmonitor};
367 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
368 use lightning::chain::keysinterface::{InMemorySigner, Recipient, KeysInterface, KeysManager};
369 use lightning::chain::transaction::OutPoint;
370 use lightning::get_event_msg;
371 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
372 use lightning::ln::features::InitFeatures;
373 use lightning::ln::msgs::{ChannelMessageHandler, Init};
374 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
375 use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
376 use lightning::util::config::UserConfig;
377 use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
378 use lightning::util::ser::Writeable;
379 use lightning::util::test_utils;
380 use lightning::util::persist::KVStorePersister;
381 use lightning_invoice::payment::{InvoicePayer, RetryAttempts};
382 use lightning_invoice::utils::DefaultRouter;
383 use lightning_persister::FilesystemPersister;
385 use std::path::PathBuf;
386 use std::sync::{Arc, Mutex};
387 use std::time::Duration;
388 use lightning::routing::scoring::{FixedPenaltyScorer};
389 use super::{BackgroundProcessor, FRESHNESS_TIMER};
391 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
393 #[derive(Clone, Eq, Hash, PartialEq)]
394 struct TestDescriptor{}
395 impl SocketDescriptor for TestDescriptor {
396 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
400 fn disconnect_socket(&mut self) {}
403 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
406 node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
407 net_graph_msg_handler: Option<Arc<NetGraphMsgHandler<Arc<NetworkGraph>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>>,
408 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
409 chain_monitor: Arc<ChainMonitor>,
410 persister: Arc<FilesystemPersister>,
411 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
412 network_graph: Arc<NetworkGraph>,
413 logger: Arc<test_utils::TestLogger>,
414 best_block: BestBlock,
415 scorer: Arc<Mutex<FixedPenaltyScorer>>,
420 let data_dir = self.persister.get_data_dir();
421 match fs::remove_dir_all(data_dir.clone()) {
422 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
429 graph_error: Option<(std::io::ErrorKind, &'static str)>,
430 manager_error: Option<(std::io::ErrorKind, &'static str)>,
431 scorer_error: Option<(std::io::ErrorKind, &'static str)>,
432 filesystem_persister: FilesystemPersister,
436 fn new(data_dir: String) -> Self {
437 let filesystem_persister = FilesystemPersister::new(data_dir.clone());
438 Self { graph_error: None, manager_error: None, scorer_error: None, filesystem_persister }
441 fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
442 Self { graph_error: Some((error, message)), ..self }
445 fn with_manager_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
446 Self { manager_error: Some((error, message)), ..self }
449 fn with_scorer_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
450 Self { scorer_error: Some((error, message)), ..self }
454 impl KVStorePersister for Persister {
455 fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
456 if key == "manager" {
457 if let Some((error, message)) = self.manager_error {
458 return Err(std::io::Error::new(error, message))
462 if key == "network_graph" {
463 if let Some((error, message)) = self.graph_error {
464 return Err(std::io::Error::new(error, message))
469 if let Some((error, message)) = self.scorer_error {
470 return Err(std::io::Error::new(error, message))
474 self.filesystem_persister.persist(key, object)
478 fn get_full_filepath(filepath: String, filename: String) -> String {
479 let mut path = PathBuf::from(filepath);
481 path.to_str().unwrap().to_string()
484 fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
485 let mut nodes = Vec::new();
486 for i in 0..num_nodes {
487 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
488 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
489 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
490 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
491 let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
492 let seed = [i as u8; 32];
493 let network = Network::Testnet;
494 let genesis_block = genesis_block(network);
495 let now = Duration::from_secs(genesis_block.header.time as u64);
496 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
497 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
498 let best_block = BestBlock::from_genesis(network);
499 let params = ChainParameters { network, best_block };
500 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params));
501 let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash()));
502 let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())));
503 let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
504 let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(Recipient::Node).unwrap(), &seed, logger.clone(), IgnoringMessageHandler{}));
505 let scorer = Arc::new(Mutex::new(test_utils::TestScorer::with_penalty(0)));
506 let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
510 for i in 0..num_nodes {
511 for j in (i+1)..num_nodes {
512 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
513 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
520 macro_rules! open_channel {
521 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
522 begin_open_channel!($node_a, $node_b, $channel_value);
523 let events = $node_a.node.get_and_clear_pending_events();
524 assert_eq!(events.len(), 1);
525 let (temporary_channel_id, tx) = handle_funding_generation_ready!(&events[0], $channel_value);
526 end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
531 macro_rules! begin_open_channel {
532 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
533 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
534 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
535 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
539 macro_rules! handle_funding_generation_ready {
540 ($event: expr, $channel_value: expr) => {{
542 &Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id } => {
543 assert_eq!(channel_value_satoshis, $channel_value);
544 assert_eq!(user_channel_id, 42);
546 let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
547 value: channel_value_satoshis, script_pubkey: output_script.clone(),
549 (temporary_channel_id, tx)
551 _ => panic!("Unexpected event"),
556 macro_rules! end_open_channel {
557 ($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
558 $node_a.node.funding_transaction_generated(&$temporary_channel_id, $tx.clone()).unwrap();
559 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
560 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
564 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
566 let prev_blockhash = node.best_block.block_hash();
567 let height = node.best_block.height() + 1;
568 let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
569 let txdata = vec![(0, tx)];
570 node.best_block = BestBlock::new(header.block_hash(), height);
573 node.node.transactions_confirmed(&header, &txdata, height);
574 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
577 node.node.best_block_updated(&header, height);
578 node.chain_monitor.best_block_updated(&header, height);
584 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
585 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
589 fn test_background_processor() {
590 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
591 // updates. Also test that when new updates are available, the manager signals that it needs
592 // re-persistence and is successfully re-persisted.
593 let nodes = create_nodes(2, "test_background_processor".to_string());
595 // Go through the channel creation process so that each node has something to persist. Since
596 // open_channel consumes events, it must complete before starting BackgroundProcessor to
597 // avoid a race with processing events.
598 let tx = open_channel!(nodes[0], nodes[1], 100000);
600 // Initiate the background processors to watch each node.
601 let data_dir = nodes[0].persister.get_data_dir();
602 let persister = Arc::new(Persister::new(data_dir));
603 let event_handler = |_: &_| {};
604 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
606 macro_rules! check_persisted_data {
607 ($node: expr, $filepath: expr) => {
608 let mut expected_bytes = Vec::new();
610 expected_bytes.clear();
611 match $node.write(&mut expected_bytes) {
613 match std::fs::read($filepath) {
615 if bytes == expected_bytes {
624 Err(e) => panic!("Unexpected error: {}", e)
630 // Check that the initial channel manager data is persisted as expected.
631 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
632 check_persisted_data!(nodes[0].node, filepath.clone());
635 if !nodes[0].node.get_persistence_condvar_value() { break }
638 // Force-close the channel.
639 nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
641 // Check that the force-close updates are persisted.
642 check_persisted_data!(nodes[0].node, filepath.clone());
644 if !nodes[0].node.get_persistence_condvar_value() { break }
647 // Check network graph is persisted
648 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "network_graph".to_string());
649 if let Some(ref handler) = nodes[0].net_graph_msg_handler {
650 let network_graph = handler.network_graph();
651 check_persisted_data!(network_graph, filepath.clone());
654 // Check scorer is persisted
655 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "scorer".to_string());
656 check_persisted_data!(nodes[0].scorer, filepath.clone());
658 assert!(bg_processor.stop().is_ok());
662 fn test_timer_tick_called() {
663 // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
664 // `FRESHNESS_TIMER`.
665 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
666 let data_dir = nodes[0].persister.get_data_dir();
667 let persister = Arc::new(Persister::new(data_dir));
668 let event_handler = |_: &_| {};
669 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
671 let log_entries = nodes[0].logger.lines.lock().unwrap();
672 let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
673 let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
674 if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
675 log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
680 assert!(bg_processor.stop().is_ok());
684 fn test_channel_manager_persist_error() {
685 // Test that if we encounter an error during manager persistence, the thread panics.
686 let nodes = create_nodes(2, "test_persist_error".to_string());
687 open_channel!(nodes[0], nodes[1], 100000);
689 let data_dir = nodes[0].persister.get_data_dir();
690 let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
691 let event_handler = |_: &_| {};
692 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
693 match bg_processor.join() {
694 Ok(_) => panic!("Expected error persisting manager"),
696 assert_eq!(e.kind(), std::io::ErrorKind::Other);
697 assert_eq!(e.get_ref().unwrap().to_string(), "test");
703 fn test_network_graph_persist_error() {
704 // Test that if we encounter an error during network graph persistence, an error gets returned.
705 let nodes = create_nodes(2, "test_persist_network_graph_error".to_string());
706 let data_dir = nodes[0].persister.get_data_dir();
707 let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
708 let event_handler = |_: &_| {};
709 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
711 match bg_processor.stop() {
712 Ok(_) => panic!("Expected error persisting network graph"),
714 assert_eq!(e.kind(), std::io::ErrorKind::Other);
715 assert_eq!(e.get_ref().unwrap().to_string(), "test");
721 fn test_scorer_persist_error() {
722 // Test that if we encounter an error during scorer persistence, an error gets returned.
723 let nodes = create_nodes(2, "test_persist_scorer_error".to_string());
724 let data_dir = nodes[0].persister.get_data_dir();
725 let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
726 let event_handler = |_: &_| {};
727 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
729 match bg_processor.stop() {
730 Ok(_) => panic!("Expected error persisting scorer"),
732 assert_eq!(e.kind(), std::io::ErrorKind::Other);
733 assert_eq!(e.get_ref().unwrap().to_string(), "test");
739 fn test_background_event_handling() {
740 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
741 let channel_value = 100000;
742 let data_dir = nodes[0].persister.get_data_dir();
743 let persister = Arc::new(Persister::new(data_dir.clone()));
745 // Set up a background event handler for FundingGenerationReady events.
746 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
747 let event_handler = move |event: &Event| {
748 sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
750 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
752 // Open a channel and check that the FundingGenerationReady event was handled.
753 begin_open_channel!(nodes[0], nodes[1], channel_value);
754 let (temporary_channel_id, funding_tx) = receiver
755 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
756 .expect("FundingGenerationReady not handled within deadline");
757 end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
759 // Confirm the funding transaction.
760 confirm_transaction(&mut nodes[0], &funding_tx);
761 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
762 confirm_transaction(&mut nodes[1], &funding_tx);
763 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
764 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
765 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
766 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
767 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
769 assert!(bg_processor.stop().is_ok());
771 // Set up a background event handler for SpendableOutputs events.
772 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
773 let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
774 let persister = Arc::new(Persister::new(data_dir));
775 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
777 // Force close the channel and check that the SpendableOutputs event was handled.
778 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
779 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
780 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
782 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
783 .expect("SpendableOutputs not handled within deadline");
785 Event::SpendableOutputs { .. } => {},
786 Event::ChannelClosed { .. } => {},
787 _ => panic!("Unexpected event: {:?}", event),
790 assert!(bg_processor.stop().is_ok());
794 fn test_invoice_payer() {
795 let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
796 let random_seed_bytes = keys_manager.get_secure_random_bytes();
797 let nodes = create_nodes(2, "test_invoice_payer".to_string());
799 // Initiate the background processors to watch each node.
800 let data_dir = nodes[0].persister.get_data_dir();
801 let persister = Arc::new(Persister::new(data_dir));
802 let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger), random_seed_bytes);
803 let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, Arc::clone(&nodes[0].scorer), Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2)));
804 let event_handler = Arc::clone(&invoice_payer);
805 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
806 assert!(bg_processor.stop().is_ok());