1 //! Utilities that take care of tasks that (1) need to happen periodically to keep Rust-Lightning
2 //! running properly, and (2) either can or should be run in the background. See docs for
3 //! [`BackgroundProcessor`] for more details on the nitty-gritty.
5 #![deny(broken_intra_doc_links)]
9 #[macro_use] extern crate lightning;
12 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
13 use lightning::chain::chainmonitor::{ChainMonitor, Persist};
14 use lightning::chain::keysinterface::{Sign, KeysInterface};
15 use lightning::ln::channelmanager::ChannelManager;
16 use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
17 use lightning::ln::peer_handler::{CustomMessageHandler, PeerManager, SocketDescriptor};
18 use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
19 use lightning::util::events::{Event, EventHandler, EventsProvider};
20 use lightning::util::logger::Logger;
22 use std::sync::atomic::{AtomicBool, Ordering};
24 use std::thread::JoinHandle;
25 use std::time::{Duration, Instant};
28 /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep
29 /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its
30 /// responsibilities are:
31 /// * Processing [`Event`]s with a user-provided [`EventHandler`].
32 /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so,
33 /// writing it to disk/backups by invoking the callback given to it at startup.
34 /// [`ChannelManager`] persistence should be done in the background.
35 /// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`]
36 /// at the appropriate intervals.
37 /// * Calling [`NetworkGraph::remove_stale_channels`] (if a [`NetGraphMsgHandler`] is provided to
38 /// [`BackgroundProcessor::start`]).
40 /// It will also call [`PeerManager::process_events`] periodically though this shouldn't be relied
41 /// upon as doing so may result in high latency.
45 /// If [`ChannelManager`] persistence fails and the persisted manager becomes out-of-date, then
46 /// there is a risk of channels force-closing on startup when the manager realizes it's outdated.
47 /// However, as long as [`ChannelMonitor`] backups are sound, no funds besides those used for
48 /// unilateral chain closure fees are at risk.
50 /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor
51 /// [`Event`]: lightning::util::events::Event
52 #[must_use = "BackgroundProcessor will immediately stop on drop. It should be stored until shutdown."]
53 pub struct BackgroundProcessor {
54 stop_thread: Arc<AtomicBool>,
55 thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
59 const FRESHNESS_TIMER: u64 = 60;
61 const FRESHNESS_TIMER: u64 = 1;
63 #[cfg(all(not(test), not(debug_assertions)))]
64 const PING_TIMER: u64 = 5;
65 /// Signature operations take a lot longer without compiler optimisations.
66 /// Increasing the ping timer allows for this but slower devices will be disconnected if the
67 /// timeout is reached.
68 #[cfg(all(not(test), debug_assertions))]
69 const PING_TIMER: u64 = 30;
71 const PING_TIMER: u64 = 1;
73 /// Prune the network graph of stale entries hourly.
74 const NETWORK_PRUNE_TIMER: u64 = 60 * 60;
76 /// Trait which handles persisting a [`ChannelManager`] to disk.
78 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
79 pub trait ChannelManagerPersister<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
81 M::Target: 'static + chain::Watch<Signer>,
82 T::Target: 'static + BroadcasterInterface,
83 K::Target: 'static + KeysInterface<Signer = Signer>,
84 F::Target: 'static + FeeEstimator,
85 L::Target: 'static + Logger,
87 /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed
88 /// (which will cause the [`BackgroundProcessor`] which called this method to exit.
90 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
91 fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>;
94 impl<Fun, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
95 ChannelManagerPersister<Signer, M, T, K, F, L> for Fun where
96 M::Target: 'static + chain::Watch<Signer>,
97 T::Target: 'static + BroadcasterInterface,
98 K::Target: 'static + KeysInterface<Signer = Signer>,
99 F::Target: 'static + FeeEstimator,
100 L::Target: 'static + Logger,
101 Fun: Fn(&ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error>,
103 fn persist_manager(&self, channel_manager: &ChannelManager<Signer, M, T, K, F, L>) -> Result<(), std::io::Error> {
104 self(channel_manager)
108 /// Decorates an [`EventHandler`] with common functionality provided by standard [`EventHandler`]s.
109 struct DecoratingEventHandler<
111 N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
112 G: Deref<Target = NetworkGraph>,
116 where A::Target: chain::Access, L::Target: Logger {
118 net_graph_msg_handler: Option<N>,
123 N: Deref<Target = NetGraphMsgHandler<G, A, L>>,
124 G: Deref<Target = NetworkGraph>,
127 > EventHandler for DecoratingEventHandler<E, N, G, A, L>
128 where A::Target: chain::Access, L::Target: Logger {
129 fn handle_event(&self, event: &Event) {
130 if let Some(event_handler) = &self.net_graph_msg_handler {
131 event_handler.handle_event(event);
133 self.event_handler.handle_event(event);
137 impl BackgroundProcessor {
138 /// Start a background thread that takes care of responsibilities enumerated in the [top-level
141 /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
142 /// `persist_manager` returns an error. In case of an error, the error is retrieved by calling
143 /// either [`join`] or [`stop`].
145 /// # Data Persistence
147 /// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
148 /// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
149 /// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
150 /// provided implementation.
152 /// Typically, users should either implement [`ChannelManagerPersister`] to never return an
153 /// error or call [`join`] and handle any error that may arise. For the latter case,
154 /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
158 /// `event_handler` is responsible for handling events that users should be notified of (e.g.,
159 /// payment failed). [`BackgroundProcessor`] may decorate the given [`EventHandler`] with common
160 /// functionality implemented by other handlers.
161 /// * [`NetGraphMsgHandler`] if given will update the [`NetworkGraph`] based on payment failures.
163 /// [top-level documentation]: BackgroundProcessor
164 /// [`join`]: Self::join
165 /// [`stop`]: Self::stop
166 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
167 /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
168 /// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
169 /// [`NetworkGraph`]: lightning::routing::network_graph::NetworkGraph
171 Signer: 'static + Sign,
172 CA: 'static + Deref + Send + Sync,
173 CF: 'static + Deref + Send + Sync,
174 CW: 'static + Deref + Send + Sync,
175 T: 'static + Deref + Send + Sync,
176 K: 'static + Deref + Send + Sync,
177 F: 'static + Deref + Send + Sync,
178 G: 'static + Deref<Target = NetworkGraph> + Send + Sync,
179 L: 'static + Deref + Send + Sync,
180 P: 'static + Deref + Send + Sync,
181 Descriptor: 'static + SocketDescriptor + Send + Sync,
182 CMH: 'static + Deref + Send + Sync,
183 RMH: 'static + Deref + Send + Sync,
184 EH: 'static + EventHandler + Send,
185 CMP: 'static + Send + ChannelManagerPersister<Signer, CW, T, K, F, L>,
186 M: 'static + Deref<Target = ChainMonitor<Signer, CF, T, F, L, P>> + Send + Sync,
187 CM: 'static + Deref<Target = ChannelManager<Signer, CW, T, K, F, L>> + Send + Sync,
188 NG: 'static + Deref<Target = NetGraphMsgHandler<G, CA, L>> + Send + Sync,
189 UMH: 'static + Deref + Send + Sync,
190 PM: 'static + Deref<Target = PeerManager<Descriptor, CMH, RMH, L, UMH>> + Send + Sync,
192 persister: CMP, event_handler: EH, chain_monitor: M, channel_manager: CM,
193 net_graph_msg_handler: Option<NG>, peer_manager: PM, logger: L
196 CA::Target: 'static + chain::Access,
197 CF::Target: 'static + chain::Filter,
198 CW::Target: 'static + chain::Watch<Signer>,
199 T::Target: 'static + BroadcasterInterface,
200 K::Target: 'static + KeysInterface<Signer = Signer>,
201 F::Target: 'static + FeeEstimator,
202 L::Target: 'static + Logger,
203 P::Target: 'static + Persist<Signer>,
204 CMH::Target: 'static + ChannelMessageHandler,
205 RMH::Target: 'static + RoutingMessageHandler,
206 UMH::Target: 'static + CustomMessageHandler,
208 let stop_thread = Arc::new(AtomicBool::new(false));
209 let stop_thread_clone = stop_thread.clone();
210 let handle = thread::spawn(move || -> Result<(), std::io::Error> {
211 let event_handler = DecoratingEventHandler { event_handler, net_graph_msg_handler: net_graph_msg_handler.as_ref().map(|t| t.deref()) };
213 log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup");
214 channel_manager.timer_tick_occurred();
216 let mut last_freshness_call = Instant::now();
217 let mut last_ping_call = Instant::now();
218 let mut last_prune_call = Instant::now();
219 let mut have_pruned = false;
222 peer_manager.process_events();
223 channel_manager.process_pending_events(&event_handler);
224 chain_monitor.process_pending_events(&event_handler);
225 let updates_available =
226 channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
227 if updates_available {
228 log_trace!(logger, "Persisting ChannelManager...");
229 persister.persist_manager(&*channel_manager)?;
230 log_trace!(logger, "Done persisting ChannelManager.");
232 // Exit the loop if the background processor was requested to stop.
233 if stop_thread.load(Ordering::Acquire) == true {
234 log_trace!(logger, "Terminating background processor.");
237 if last_freshness_call.elapsed().as_secs() > FRESHNESS_TIMER {
238 log_trace!(logger, "Calling ChannelManager's timer_tick_occurred");
239 channel_manager.timer_tick_occurred();
240 last_freshness_call = Instant::now();
242 if last_ping_call.elapsed().as_secs() > PING_TIMER * 2 {
243 // On various platforms, we may be starved of CPU cycles for several reasons.
244 // E.g. on iOS, if we've been in the background, we will be entirely paused.
245 // Similarly, if we're on a desktop platform and the device has been asleep, we
246 // may not get any cycles.
247 // In any case, if we've been entirely paused for more than double our ping
248 // timer, we should have disconnected all sockets by now (and they're probably
249 // dead anyway), so disconnect them by calling `timer_tick_occurred()` twice.
250 log_trace!(logger, "Awoke after more than double our ping timer, disconnecting peers.");
251 peer_manager.disconnect_all_peers();
252 last_ping_call = Instant::now();
253 } else if last_ping_call.elapsed().as_secs() > PING_TIMER {
254 log_trace!(logger, "Calling PeerManager's timer_tick_occurred");
255 peer_manager.timer_tick_occurred();
256 last_ping_call = Instant::now();
259 // Note that we want to run a graph prune once not long after startup before
260 // falling back to our usual hourly prunes. This avoids short-lived clients never
261 // pruning their network graph. We run once 60 seconds after startup before
262 // continuing our normal cadence.
263 if last_prune_call.elapsed().as_secs() > if have_pruned { NETWORK_PRUNE_TIMER } else { 60 } {
264 if let Some(ref handler) = net_graph_msg_handler {
265 log_trace!(logger, "Pruning network graph of stale entries");
266 handler.network_graph().remove_stale_channels();
267 last_prune_call = Instant::now();
272 // After we exit, ensure we persist the ChannelManager one final time - this avoids
273 // some races where users quit while channel updates were in-flight, with
274 // ChannelMonitor update(s) persisted without a corresponding ChannelManager update.
275 persister.persist_manager(&*channel_manager)
277 Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
280 /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
281 /// [`ChannelManager`].
285 /// This function panics if the background thread has panicked such as while persisting or
288 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
289 pub fn join(mut self) -> Result<(), std::io::Error> {
290 assert!(self.thread_handle.is_some());
294 /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
295 /// [`ChannelManager`].
299 /// This function panics if the background thread has panicked such as while persisting or
302 /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
303 pub fn stop(mut self) -> Result<(), std::io::Error> {
304 assert!(self.thread_handle.is_some());
305 self.stop_and_join_thread()
308 fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
309 self.stop_thread.store(true, Ordering::Release);
313 fn join_thread(&mut self) -> Result<(), std::io::Error> {
314 match self.thread_handle.take() {
315 Some(handle) => handle.join().unwrap(),
321 impl Drop for BackgroundProcessor {
323 self.stop_and_join_thread().unwrap();
329 use bitcoin::blockdata::block::BlockHeader;
330 use bitcoin::blockdata::constants::genesis_block;
331 use bitcoin::blockdata::transaction::{Transaction, TxOut};
332 use bitcoin::network::constants::Network;
333 use lightning::chain::{BestBlock, Confirm, chainmonitor};
334 use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
335 use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
336 use lightning::chain::transaction::OutPoint;
337 use lightning::get_event_msg;
338 use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, ChannelManager, SimpleArcChannelManager};
339 use lightning::ln::features::InitFeatures;
340 use lightning::ln::msgs::{ChannelMessageHandler, Init};
341 use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler};
342 use lightning::routing::network_graph::{NetworkGraph, NetGraphMsgHandler};
343 use lightning::util::config::UserConfig;
344 use lightning::util::events::{Event, MessageSendEventsProvider, MessageSendEvent};
345 use lightning::util::ser::Writeable;
346 use lightning::util::test_utils;
347 use lightning_invoice::payment::{InvoicePayer, RetryAttempts};
348 use lightning_invoice::utils::DefaultRouter;
349 use lightning_persister::FilesystemPersister;
351 use std::path::PathBuf;
352 use std::sync::{Arc, Mutex};
353 use std::time::Duration;
354 use super::{BackgroundProcessor, FRESHNESS_TIMER};
356 const EVENT_DEADLINE: u64 = 5 * FRESHNESS_TIMER;
358 #[derive(Clone, Eq, Hash, PartialEq)]
359 struct TestDescriptor{}
360 impl SocketDescriptor for TestDescriptor {
361 fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
365 fn disconnect_socket(&mut self) {}
368 type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
371 node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
372 net_graph_msg_handler: Option<Arc<NetGraphMsgHandler<Arc<NetworkGraph>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>>,
373 peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>, IgnoringMessageHandler>>,
374 chain_monitor: Arc<ChainMonitor>,
375 persister: Arc<FilesystemPersister>,
376 tx_broadcaster: Arc<test_utils::TestBroadcaster>,
377 network_graph: Arc<NetworkGraph>,
378 logger: Arc<test_utils::TestLogger>,
379 best_block: BestBlock,
384 let data_dir = self.persister.get_data_dir();
385 match fs::remove_dir_all(data_dir.clone()) {
386 Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
392 fn get_full_filepath(filepath: String, filename: String) -> String {
393 let mut path = PathBuf::from(filepath);
395 path.to_str().unwrap().to_string()
398 fn create_nodes(num_nodes: usize, persist_dir: String) -> Vec<Node> {
399 let mut nodes = Vec::new();
400 for i in 0..num_nodes {
401 let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
402 let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
403 let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
404 let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
405 let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
406 let seed = [i as u8; 32];
407 let network = Network::Testnet;
408 let genesis_block = genesis_block(network);
409 let now = Duration::from_secs(genesis_block.header.time as u64);
410 let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
411 let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
412 let best_block = BestBlock::from_genesis(network);
413 let params = ChainParameters { network, best_block };
414 let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), keys_manager.clone(), UserConfig::default(), params));
415 let network_graph = Arc::new(NetworkGraph::new(genesis_block.header.block_hash()));
416 let net_graph_msg_handler = Some(Arc::new(NetGraphMsgHandler::new(network_graph.clone(), Some(chain_source.clone()), logger.clone())));
417 let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
418 let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone(), IgnoringMessageHandler{}));
419 let node = Node { node: manager, net_graph_msg_handler, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block };
423 for i in 0..num_nodes {
424 for j in (i+1)..num_nodes {
425 nodes[i].node.peer_connected(&nodes[j].node.get_our_node_id(), &Init { features: InitFeatures::known() });
426 nodes[j].node.peer_connected(&nodes[i].node.get_our_node_id(), &Init { features: InitFeatures::known() });
433 macro_rules! open_channel {
434 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
435 begin_open_channel!($node_a, $node_b, $channel_value);
436 let events = $node_a.node.get_and_clear_pending_events();
437 assert_eq!(events.len(), 1);
438 let (temporary_channel_id, tx) = handle_funding_generation_ready!(&events[0], $channel_value);
439 end_open_channel!($node_a, $node_b, temporary_channel_id, tx);
444 macro_rules! begin_open_channel {
445 ($node_a: expr, $node_b: expr, $channel_value: expr) => {{
446 $node_a.node.create_channel($node_b.node.get_our_node_id(), $channel_value, 100, 42, None).unwrap();
447 $node_b.node.handle_open_channel(&$node_a.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_a, MessageSendEvent::SendOpenChannel, $node_b.node.get_our_node_id()));
448 $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
452 macro_rules! handle_funding_generation_ready {
453 ($event: expr, $channel_value: expr) => {{
455 &Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, ref output_script, user_channel_id } => {
456 assert_eq!(channel_value_satoshis, $channel_value);
457 assert_eq!(user_channel_id, 42);
459 let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
460 value: channel_value_satoshis, script_pubkey: output_script.clone(),
462 (temporary_channel_id, tx)
464 _ => panic!("Unexpected event"),
469 macro_rules! end_open_channel {
470 ($node_a: expr, $node_b: expr, $temporary_channel_id: expr, $tx: expr) => {{
471 $node_a.node.funding_transaction_generated(&$temporary_channel_id, $tx.clone()).unwrap();
472 $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
473 $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
477 fn confirm_transaction_depth(node: &mut Node, tx: &Transaction, depth: u32) {
479 let prev_blockhash = node.best_block.block_hash();
480 let height = node.best_block.height() + 1;
481 let header = BlockHeader { version: 0x20000000, prev_blockhash, merkle_root: Default::default(), time: height, bits: 42, nonce: 42 };
482 let txdata = vec![(0, tx)];
483 node.best_block = BestBlock::new(header.block_hash(), height);
486 node.node.transactions_confirmed(&header, &txdata, height);
487 node.chain_monitor.transactions_confirmed(&header, &txdata, height);
490 node.node.best_block_updated(&header, height);
491 node.chain_monitor.best_block_updated(&header, height);
497 fn confirm_transaction(node: &mut Node, tx: &Transaction) {
498 confirm_transaction_depth(node, tx, ANTI_REORG_DELAY);
502 fn test_background_processor() {
503 // Test that when a new channel is created, the ChannelManager needs to be re-persisted with
504 // updates. Also test that when new updates are available, the manager signals that it needs
505 // re-persistence and is successfully re-persisted.
506 let nodes = create_nodes(2, "test_background_processor".to_string());
508 // Go through the channel creation process so that each node has something to persist. Since
509 // open_channel consumes events, it must complete before starting BackgroundProcessor to
510 // avoid a race with processing events.
511 let tx = open_channel!(nodes[0], nodes[1], 100000);
513 // Initiate the background processors to watch each node.
514 let data_dir = nodes[0].persister.get_data_dir();
515 let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
516 let event_handler = |_: &_| {};
517 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
519 macro_rules! check_persisted_data {
520 ($node: expr, $filepath: expr, $expected_bytes: expr) => {
522 $expected_bytes.clear();
523 match $node.write(&mut $expected_bytes) {
525 match std::fs::read($filepath) {
527 if bytes == $expected_bytes {
536 Err(e) => panic!("Unexpected error: {}", e)
542 // Check that the initial channel manager data is persisted as expected.
543 let filepath = get_full_filepath("test_background_processor_persister_0".to_string(), "manager".to_string());
544 let mut expected_bytes = Vec::new();
545 check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
547 if !nodes[0].node.get_persistence_condvar_value() { break }
550 // Force-close the channel.
551 nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
553 // Check that the force-close updates are persisted.
554 let mut expected_bytes = Vec::new();
555 check_persisted_data!(nodes[0].node, filepath.clone(), expected_bytes);
557 if !nodes[0].node.get_persistence_condvar_value() { break }
560 assert!(bg_processor.stop().is_ok());
564 fn test_timer_tick_called() {
565 // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
566 // `FRESHNESS_TIMER`.
567 let nodes = create_nodes(1, "test_timer_tick_called".to_string());
568 let data_dir = nodes[0].persister.get_data_dir();
569 let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
570 let event_handler = |_: &_| {};
571 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
573 let log_entries = nodes[0].logger.lines.lock().unwrap();
574 let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string();
575 let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string();
576 if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() &&
577 log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() {
582 assert!(bg_processor.stop().is_ok());
586 fn test_persist_error() {
587 // Test that if we encounter an error during manager persistence, the thread panics.
588 let nodes = create_nodes(2, "test_persist_error".to_string());
589 open_channel!(nodes[0], nodes[1], 100000);
591 let persister = |_: &_| Err(std::io::Error::new(std::io::ErrorKind::Other, "test"));
592 let event_handler = |_: &_| {};
593 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
594 match bg_processor.join() {
595 Ok(_) => panic!("Expected error persisting manager"),
597 assert_eq!(e.kind(), std::io::ErrorKind::Other);
598 assert_eq!(e.get_ref().unwrap().to_string(), "test");
604 fn test_background_event_handling() {
605 let mut nodes = create_nodes(2, "test_background_event_handling".to_string());
606 let channel_value = 100000;
607 let data_dir = nodes[0].persister.get_data_dir();
608 let persister = move |node: &_| FilesystemPersister::persist_manager(data_dir.clone(), node);
610 // Set up a background event handler for FundingGenerationReady events.
611 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
612 let event_handler = move |event: &Event| {
613 sender.send(handle_funding_generation_ready!(event, channel_value)).unwrap();
615 let bg_processor = BackgroundProcessor::start(persister.clone(), event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
617 // Open a channel and check that the FundingGenerationReady event was handled.
618 begin_open_channel!(nodes[0], nodes[1], channel_value);
619 let (temporary_channel_id, funding_tx) = receiver
620 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
621 .expect("FundingGenerationReady not handled within deadline");
622 end_open_channel!(nodes[0], nodes[1], temporary_channel_id, funding_tx);
624 // Confirm the funding transaction.
625 confirm_transaction(&mut nodes[0], &funding_tx);
626 let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
627 confirm_transaction(&mut nodes[1], &funding_tx);
628 let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
629 nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
630 let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
631 nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
632 let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
634 assert!(bg_processor.stop().is_ok());
636 // Set up a background event handler for SpendableOutputs events.
637 let (sender, receiver) = std::sync::mpsc::sync_channel(1);
638 let event_handler = move |event: &Event| sender.send(event.clone()).unwrap();
639 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
641 // Force close the channel and check that the SpendableOutputs event was handled.
642 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
643 let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
644 confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
646 .recv_timeout(Duration::from_secs(EVENT_DEADLINE))
647 .expect("SpendableOutputs not handled within deadline");
649 Event::SpendableOutputs { .. } => {},
650 Event::ChannelClosed { .. } => {},
651 _ => panic!("Unexpected event: {:?}", event),
654 assert!(bg_processor.stop().is_ok());
658 fn test_invoice_payer() {
659 let nodes = create_nodes(2, "test_invoice_payer".to_string());
661 // Initiate the background processors to watch each node.
662 let data_dir = nodes[0].persister.get_data_dir();
663 let persister = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
664 let router = DefaultRouter::new(Arc::clone(&nodes[0].network_graph), Arc::clone(&nodes[0].logger));
665 let scorer = Arc::new(Mutex::new(test_utils::TestScorer::default()));
666 let invoice_payer = Arc::new(InvoicePayer::new(Arc::clone(&nodes[0].node), router, scorer, Arc::clone(&nodes[0].logger), |_: &_| {}, RetryAttempts(2)));
667 let event_handler = Arc::clone(&invoice_payer);
668 let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].net_graph_msg_handler.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
669 assert!(bg_processor.stop().is_ok());