/// for unilateral chain closure fees are at risk.
pub struct BackgroundProcessor {
stop_thread: Arc<AtomicBool>,
- /// May be used to retrieve and handle the error if `BackgroundProcessor`'s thread
- /// exits due to an error while persisting.
- pub thread_handle: JoinHandle<Result<(), std::io::Error>>,
+ thread_handle: Option<JoinHandle<Result<(), std::io::Error>>>,
}
#[cfg(not(test))]
}
impl BackgroundProcessor {
- /// Start a background thread that takes care of responsibilities enumerated in the top-level
- /// documentation.
+ /// Start a background thread that takes care of responsibilities enumerated in the [top-level
+ /// documentation].
///
- /// If `persist_manager` returns an error, then this thread will return said error (and
- /// `start()` will need to be called again to restart the `BackgroundProcessor`). Users should
- /// wait on [`thread_handle`]'s `join()` method to be able to tell if and when an error is
- /// returned, or implement `persist_manager` such that an error is never returned to the
- /// `BackgroundProcessor`
+ /// The thread runs indefinitely unless the object is dropped, [`stop`] is called, or
+ /// `persist_manager` returns an error. In case of an error, the error is retrieved by calling
+ /// either [`join`] or [`stop`].
+ ///
+ /// Typically, users should either implement [`ChannelManagerPersister`] to never return an
+ /// error or call [`join`] and handle any error that may arise. For the latter case, the
+ /// `BackgroundProcessor` must be restarted by calling `start` again after handling the error.
///
/// `persist_manager` is responsible for writing out the [`ChannelManager`] to disk, and/or
/// uploading to one or more backup services. See [`ChannelManager::write`] for writing out a
/// [`ChannelManager`]. See [`FilesystemPersister::persist_manager`] for Rust-Lightning's
/// provided implementation.
///
- /// [`thread_handle`]: BackgroundProcessor::thread_handle
+ /// [top-level documentation]: Self
+ /// [`join`]: Self::join
+ /// [`stop`]: Self::stop
/// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
/// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
/// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
}
}
});
- Self { stop_thread: stop_thread_clone, thread_handle: handle }
+ Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
+ }
+
+ /// Join `BackgroundProcessor`'s thread, returning any error that occurred while persisting
+ /// [`ChannelManager`].
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the background thread has panicked such as while persisting or
+ /// handling events.
+ ///
+ /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+ pub fn join(mut self) -> Result<(), std::io::Error> {
+ assert!(self.thread_handle.is_some());
+ self.join_thread()
+ }
+
+ /// Stop `BackgroundProcessor`'s thread, returning any error that occurred while persisting
+ /// [`ChannelManager`].
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the background thread has panicked such as while persisting or
+ /// handling events.
+ ///
+ /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
+ pub fn stop(mut self) -> Result<(), std::io::Error> {
+ assert!(self.thread_handle.is_some());
+ self.stop_and_join_thread()
}
- /// Stop `BackgroundProcessor`'s thread.
- pub fn stop(self) -> Result<(), std::io::Error> {
+ fn stop_and_join_thread(&mut self) -> Result<(), std::io::Error> {
self.stop_thread.store(true, Ordering::Release);
- self.thread_handle.join().unwrap()
+ self.join_thread()
+ }
+
+ fn join_thread(&mut self) -> Result<(), std::io::Error> {
+ match self.thread_handle.take() {
+ Some(handle) => handle.join().unwrap(),
+ None => Ok(()),
+ }
+ }
+}
+
+impl Drop for BackgroundProcessor {
+ fn drop(&mut self) {
+ self.stop_and_join_thread().unwrap();
}
}
let mut nodes = Vec::new();
for i in 0..num_nodes {
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))});
- let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+ let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) });
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", persist_dir, i)));
let persister = |_: &_| Err(std::io::Error::new(std::io::ErrorKind::Other, "test"));
let event_handler = |_| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
- let _ = bg_processor.thread_handle.join().unwrap().expect_err("Errored persisting manager: test");
+ match bg_processor.join() {
+ Ok(_) => panic!("Expected error persisting manager"),
+ Err(e) => {
+ assert_eq!(e.kind(), std::io::ErrorKind::Other);
+ assert_eq!(e.get_ref().unwrap().to_string(), "test");
+ },
+ }
}
#[test]
// Confirm the funding transaction.
confirm_transaction(&mut nodes[0], &funding_tx);
+ let as_funding = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
confirm_transaction(&mut nodes[1], &funding_tx);
- nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
- nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
+ let bs_funding = get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &bs_funding);
+ let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding);
+ let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
assert!(bg_processor.stop().is_ok());