From: Jeffrey Czyz Date: Tue, 21 Jul 2020 05:12:14 +0000 (-0700) Subject: Rename SimpleManyChannelMonitor to ChainMonitor X-Git-Tag: v0.0.12~21^2~12 X-Git-Url: http://git.bitcoin.ninja/?a=commitdiff_plain;h=6662e959c8443fd74c76c2908cfb0c78ec4f906a;p=rust-lightning Rename SimpleManyChannelMonitor to ChainMonitor ManyChannelMonitor was renamed chain::Watch in the previous commit. Use a more concise name for an implementation that monitors the chain for channel activity. Future work will parameterize the struct to allow for different varieties of persistence. Thus, users usually will be able to use ChainMonitor directly rather than implementing a chain::Watch that wraps it. --- diff --git a/ARCH.md b/ARCH.md index 1d987a353..b7276669a 100644 --- a/ARCH.md +++ b/ARCH.md @@ -39,24 +39,24 @@ At a high level, some of the common interfaces fit together as follows: ----------------- | UserConfig | -------------------- | -------------- /------| MessageSendEvent | | | ---------------- - | -------------------- | | | FeeEstimator | - | (as MessageSendEventsProvider) | | ---------------- - | ^ | | / | ------------------------ - | \ | | / ---------> | BroadcasterInterface | - | \ | | / / | ------------------------ - | \ v v v / v ^ - | (as ------------------ ---------------- - | ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch | - v / ------------------ ---------------- ---------------- / (as EventsProvider) -| PeerManager |- \ / ---------------- \ / - | ----------------- \ / - | | chain::Access | v - | ----------------- --------- - | | | Event | -(as RoutingMessageHandler) v --------- - \ -------------------- - -----------------> | NetGraphMsgHandler | - -------------------- + | -------------------- | | | FeeEstimator | <----------------------- + | (as MessageSendEventsProvider) | | ---------------- \ + | ^ | | / ------------------------ | + | \ | | / ---------> | BroadcasterInterface | | + | \ | | / / ------------------------ | + | \ v v v / ^ | + | (as ------------------ ---------------- | | + | ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch | | | + v / ------------------ ---------------- | | +--------------- / (as EventsProvider) ^ | | +| PeerManager |- \ | | | +--------------- \ | (is-a) | | + | ----------------- \ _---------------- / / + | | chain::Access | \ / | ChainMonitor |--------------- + | ----------------- \ / ---------------- + | | \ / +(as RoutingMessageHandler) v v + \ -------------------- --------- + -----------------> | NetGraphMsgHandler | | Event | + -------------------- --------- ``` diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 3ce6fa26f..332940b2f 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -81,9 +81,9 @@ impl Writer for VecWriter { } } -struct TestChannelMonitor { +struct TestChainMonitor { pub logger: Arc, - pub simple_monitor: Arc, Arc, Arc>>, + pub chain_monitor: Arc, Arc, Arc>>, pub update_ret: Mutex>, // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization // logic will automatically force-close our channels for us (as we don't have an up-to-date @@ -93,10 +93,10 @@ struct TestChannelMonitor { pub latest_monitors: Mutex)>>, pub should_update_manager: atomic::AtomicBool, } -impl TestChannelMonitor { +impl TestChainMonitor { pub fn new(broadcaster: Arc, logger: Arc, feeest: Arc) -> Self { Self { - simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger.clone(), feeest)), + chain_monitor: Arc::new(channelmonitor::ChainMonitor::new(broadcaster, logger.clone(), feeest)), logger, update_ret: Mutex::new(Ok(())), latest_monitors: Mutex::new(HashMap::new()), @@ -104,7 +104,7 @@ impl TestChannelMonitor { } } } -impl chain::Watch for TestChannelMonitor { +impl chain::Watch for TestChainMonitor { type Keys = EnforcingChannelKeys; fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { @@ -114,7 +114,7 @@ impl chain::Watch for TestChannelMonitor { panic!("Already had monitor pre-watch_channel"); } self.should_update_manager.store(true, atomic::Ordering::Relaxed); - assert!(self.simple_monitor.watch_channel(funding_txo, monitor).is_ok()); + assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok()); self.update_ret.lock().unwrap().clone() } @@ -135,7 +135,7 @@ impl chain::Watch for TestChannelMonitor { } fn release_pending_monitor_events(&self) -> Vec { - return self.simple_monitor.release_pending_monitor_events(); + return self.chain_monitor.release_pending_monitor_events(); } } @@ -191,7 +191,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! make_node { ($node_id: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) }); let mut config = UserConfig::default(); @@ -206,7 +206,7 @@ pub fn do_test(data: &[u8], out: Out) { macro_rules! reload_node { ($ser: expr, $node_id: expr, $old_monitors: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) }); let mut config = UserConfig::default(); @@ -235,7 +235,7 @@ pub fn do_test(data: &[u8], out: Out) { channel_monitors: monitor_refs, }; - (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor) + (<(BlockHash, ChannelManager, Arc, Arc, Arc, Arc>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor) } } } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 61458e241..8d1c68865 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -145,13 +145,13 @@ impl<'a> std::hash::Hash for Peer<'a> { type ChannelMan = ChannelManager< EnforcingChannelKeys, - Arc, Arc, Arc>>, + Arc, Arc, Arc>>, Arc, Arc, Arc, Arc>; type PeerMan<'a> = PeerManager, Arc, Arc, Arc>>, Arc>; struct MoneyLossDetector<'a> { manager: Arc, - monitor: Arc, Arc, Arc>>, handler: PeerMan<'a>, @@ -166,7 +166,7 @@ struct MoneyLossDetector<'a> { impl<'a> MoneyLossDetector<'a> { pub fn new(peers: &'a RefCell<[bool; 256]>, manager: Arc, - monitor: Arc, Arc, Arc>>, + monitor: Arc, Arc, Arc>>, handler: PeerMan<'a>) -> Self { MoneyLossDetector { manager, @@ -334,7 +334,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { }; let broadcast = Arc::new(TestBroadcaster{}); - let monitor = Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone())); + let monitor = Arc::new(channelmonitor::ChainMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone())); let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) }); let mut config = UserConfig::default(); diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 0757ffd58..d30711648 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -35,12 +35,12 @@ //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator; //! type Logger = dyn lightning::util::logger::Logger; //! type ChainAccess = dyn lightning::chain::Access; -//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor, Arc, Arc>; -//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager; -//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager; +//! type ChainMonitor = lightning::ln::channelmonitor::ChainMonitor, Arc, Arc>; +//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager; +//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager; //! //! // Connect to node with pubkey their_node_id at addr: -//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { +//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) { //! let (sender, mut receiver) = mpsc::channel(2); //! lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await; //! loop { @@ -48,14 +48,14 @@ //! for _event in channel_manager.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } -//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) { +//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } //! } //! } //! //! // Begin reading from a newly accepted socket and talk to the peer: -//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc, channel_manager: ChannelManager, socket: TcpStream) { +//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc, channel_manager: ChannelManager, socket: TcpStream) { //! let (sender, mut receiver) = mpsc::channel(2); //! lightning_net_tokio::setup_inbound(peer_manager, sender, socket); //! loop { @@ -63,7 +63,7 @@ //! for _event in channel_manager.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } -//! for _event in channel_monitor.get_and_clear_pending_events().drain(..) { +//! for _event in chain_monitor.get_and_clear_pending_events().drain(..) { //! // Handle the event! //! } //! } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index aa549de23..e99dca78b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -41,7 +41,7 @@ fn test_simple_monitor_permanent_update_fail() { let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure); let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {}); @@ -76,7 +76,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; @@ -95,8 +95,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -125,7 +125,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Now set it to failed again... let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]); { - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {}); @@ -191,7 +191,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now try to send a second payment which will fail to send let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); { - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {}); @@ -245,8 +245,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } // Now fix monitor updating... - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -532,15 +532,15 @@ fn test_monitor_update_fail_cs() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); @@ -563,7 +563,7 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fee.is_none()); assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -573,8 +573,8 @@ fn test_monitor_update_fail_cs() { _ => panic!("Unexpected event"), } - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -622,7 +622,7 @@ fn test_monitor_update_fail_no_rebroadcast() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -630,8 +630,8 @@ fn test_monitor_update_fail_no_rebroadcast() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -684,7 +684,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -696,8 +696,8 @@ fn test_monitor_update_raa_while_paused() { nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -779,7 +779,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Now fail monitor updating. - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -797,7 +797,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[0], 1); } - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); @@ -858,8 +858,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1020,7 +1020,7 @@ fn test_monitor_update_fail_reestablish() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() }); @@ -1049,8 +1049,8 @@ fn test_monitor_update_fail_reestablish() { check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1123,7 +1123,7 @@ fn raa_no_response_awaiting_raa_state() { // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1135,8 +1135,8 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); @@ -1228,7 +1228,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1257,8 +1257,8 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1342,7 +1342,7 @@ fn monitor_failed_no_reestablish_response() { check_added_monitors!(nodes[0], 1); } - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -1366,8 +1366,8 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1445,7 +1445,7 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response @@ -1464,8 +1464,8 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1509,7 +1509,7 @@ fn test_monitor_update_fail_claim() { let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000)); check_added_monitors!(nodes[1], 1); @@ -1523,7 +1523,7 @@ fn test_monitor_update_fail_claim() { // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1556,7 +1556,7 @@ fn test_monitor_update_fail_claim() { } else { panic!("Unexpected event!"); } // Now restore monitor updating on the 0<->1 channel and claim the funds on B. - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1612,14 +1612,14 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1675,15 +1675,15 @@ fn monitor_update_claim_fail_no_response() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000)); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1728,20 +1728,20 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output); check_added_monitors!(nodes[0], 0); - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[0], 0); @@ -1777,8 +1777,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } - *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); - let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); + let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); nodes[1].node.channel_monitor_updated(&outpoint, latest_update); check_added_monitors!(nodes[1], 0); @@ -1843,8 +1843,8 @@ fn test_path_paused_mpp() { // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second // (for the path 0 -> 2 -> 3) fails. - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); - *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); + *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure)); // Now check that we get the right return value, indicating that the first path succeeded but // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as @@ -1855,7 +1855,7 @@ fn test_path_paused_mpp() { if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); } } else { panic!(); } check_added_monitors!(nodes[0], 2); - *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(()); + *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(()); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1864,7 +1864,7 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); nodes[0].node.channel_monitor_updated(&outpoint, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs index 4af7a5f62..3f3f06f40 100644 --- a/lightning/src/ln/channelmonitor.rs +++ b/lightning/src/ln/channelmonitor.rs @@ -203,7 +203,7 @@ impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source }); /// [`chain::Watch`]: ../../chain/trait.Watch.html /// /// (C-not exported) due to an unconstrained generic in `Key` -pub struct SimpleManyChannelMonitor +pub struct ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -278,7 +278,7 @@ impl WatchEventQueue { } impl - ChainListener for SimpleManyChannelMonitor + ChainListener for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -308,23 +308,21 @@ impl SimpleManyChannelMonitor +impl ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { /// Creates a new object which can be used to monitor several channels given the chain /// interface with which to register to receive notifications. - pub fn new(broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor { - let res = SimpleManyChannelMonitor { + pub fn new(broadcaster: T, logger: L, feeest: F) -> ChainMonitor { + Self { monitors: Mutex::new(HashMap::new()), watch_events: Mutex::new(WatchEventQueue::new()), broadcaster, logger, fee_estimator: feeest, - }; - - res + } } /// Adds or updates the monitor which monitors the channel referred to by the given key. @@ -363,7 +361,7 @@ impl chain::Watch for SimpleManyChannelMonitor +impl chain::Watch for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -393,7 +391,7 @@ impl events::EventsProvider for SimpleManyChannelMonitor +impl events::EventsProvider for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -407,7 +405,7 @@ impl chain::WatchEventProvider for SimpleManyChannelMonitor +impl chain::WatchEventProvider for ChainMonitor where T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, @@ -1474,7 +1472,7 @@ impl ChannelMonitor { /// Gets the list of pending events which were generated by previous actions, clearing the list /// in the process. /// - /// This is called by SimpleManyChannelMonitor::get_and_clear_pending_events() and is equivalent to + /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do /// no internal locking in ChannelMonitors. pub fn get_and_clear_pending_events(&mut self) -> Vec { diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c211829cf..962f8001f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -23,7 +23,7 @@ use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler}; use util::enforcing_trait_impls::EnforcingChannelKeys; use util::test_utils; -use util::test_utils::TestChannelMonitor; +use util::test_utils::TestChainMonitor; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; use util::config::UserConfig; @@ -85,14 +85,14 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, use chain::WatchEventProvider; use chain::chaininterface::ChainListener; - let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events(); + let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events(); process_chain_watch_events(&watch_events); let txdata: Vec<_> = block.txdata.iter().enumerate().collect(); loop { - node.chan_monitor.simple_monitor.block_connected(&block.header, &txdata, height); + node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height); - let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events(); + let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events(); process_chain_watch_events(&watch_events); if watch_events.is_empty() { @@ -120,7 +120,7 @@ pub struct NodeCfg<'a> { pub chain_source: &'a test_utils::TestChainSource, pub tx_broadcaster: &'a test_utils::TestBroadcaster, pub fee_estimator: &'a test_utils::TestFeeEstimator, - pub chan_monitor: test_utils::TestChannelMonitor<'a>, + pub chain_monitor: test_utils::TestChainMonitor<'a>, pub keys_manager: test_utils::TestKeysInterface, pub logger: &'a test_utils::TestLogger, pub node_seed: [u8; 32], @@ -130,9 +130,9 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> { pub block_notifier: chaininterface::BlockNotifierRef<'a>, pub chain_source: &'c test_utils::TestChainSource, pub tx_broadcaster: &'c test_utils::TestBroadcaster, - pub chan_monitor: &'b test_utils::TestChannelMonitor<'c>, + pub chain_monitor: &'b test_utils::TestChainMonitor<'c>, pub keys_manager: &'b test_utils::TestKeysInterface, - pub node: &'a ChannelManager, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>, + pub node: &'a ChannelManager, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>, pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>, pub node_seed: [u8; 32], pub network_payment_count: Rc>, @@ -146,7 +146,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { // Check that we processed all pending events assert!(self.node.get_and_clear_pending_msg_events().is_empty()); assert!(self.node.get_and_clear_pending_events().is_empty()); - assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty()); + assert!(self.chain_monitor.added_monitors.lock().unwrap().is_empty()); // Check that if we serialize the Router, we can deserialize it again. { @@ -186,7 +186,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 }; let mut deserialized_monitors = Vec::new(); { - let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap(); + let old_monitors = self.chain_monitor.chain_monitor.monitors.lock().unwrap(); for (_, old_monitor) in old_monitors.iter() { let mut w = test_utils::TestVecWriter(Vec::new()); old_monitor.write_for_disk(&mut w).unwrap(); @@ -206,18 +206,18 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let mut w = test_utils::TestVecWriter(Vec::new()); self.node.write(&mut w).unwrap(); - <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager: self.keys_manager, fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 }, - chain_monitor: self.chan_monitor, + chain_monitor: self.chain_monitor, tx_broadcaster: self.tx_broadcaster.clone(), logger: &test_utils::TestLogger::new(), channel_monitors, }).unwrap(); } - let channel_monitor = test_utils::TestChannelMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest); + let channel_monitor = test_utils::TestChainMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest); for deserialized_monitor in deserialized_monitors.drain(..) { if let Err(_) = channel_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) { panic!(); @@ -309,7 +309,7 @@ macro_rules! get_feerate { macro_rules! get_local_commitment_txn { ($node: expr, $channel_id: expr) => { { - let mut monitors = $node.chan_monitor.simple_monitor.monitors.lock().unwrap(); + let mut monitors = $node.chain_monitor.chain_monitor.monitors.lock().unwrap(); let mut commitment_txn = None; for (funding_txo, monitor) in monitors.iter_mut() { if funding_txo.to_channel_id() == $channel_id { @@ -347,7 +347,7 @@ macro_rules! unwrap_send_err { macro_rules! check_added_monitors { ($node: expr, $count: expr) => { { - let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), $count); added_monitors.clear(); } @@ -386,7 +386,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, ' node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); { - let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -394,7 +394,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, ' node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())); { - let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -1122,39 +1122,39 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec(node_count: usize, cfgs: &'a Vec>, node_config: &[Option]) -> Vec, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> { +pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec>, node_config: &[Option]) -> Vec, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> { let mut chanmgrs = Vec::new(); for i in 0..node_count { let mut default_config = UserConfig::default(); default_config.channel_options.announced_channel = true; default_config.peer_channel_config_limits.force_announced_channel_preference = false; default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit - let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0); + let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0); chanmgrs.push(node); } chanmgrs } -pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec>, chan_mgrs: &'a Vec, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec> { +pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec>, chan_mgrs: &'a Vec, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec> { let mut nodes = Vec::new(); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); for i in 0..node_count { let block_notifier = chaininterface::BlockNotifier::new(); - block_notifier.register_listener(&cfgs[i].chan_monitor.simple_monitor as &chaininterface::ChainListener); + block_notifier.register_listener(&cfgs[i].chain_monitor.chain_monitor as &chaininterface::ChainListener); block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener); let net_graph_msg_handler = NetGraphMsgHandler::new(None, cfgs[i].logger); nodes.push(Node{ chain_source: cfgs[i].chain_source, block_notifier, - tx_broadcaster: cfgs[i].tx_broadcaster, chan_monitor: &cfgs[i].chan_monitor, + tx_broadcaster: cfgs[i].tx_broadcaster, chain_monitor: &cfgs[i].chain_monitor, keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler, node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(), network_payment_count: payment_count.clone(), logger: cfgs[i].logger, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2635eb7ba..08e8187ad 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -476,7 +476,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 4 { return; } nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); { - let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -486,7 +486,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 5 { return; } nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); { - let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -660,7 +660,7 @@ fn test_update_fee_with_fundee_update_add_htlc() { // nothing happens since node[1] is in AwaitingRemoteRevoke nodes[1].node.send_payment(&route, our_payment_hash, &None).unwrap(); { - let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 0); added_monitors.clear(); } @@ -2847,14 +2847,14 @@ fn test_htlc_on_chain_success() { // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward connect_block(&nodes[1], &Block { header, txdata: node_txn}, 1); { - let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0.txid, chan_2.3.txid()); added_monitors.clear(); } let events = nodes[1].node.get_and_clear_pending_msg_events(); { - let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 2); assert_eq!(added_monitors[0].0.txid, chan_1.3.txid()); assert_eq!(added_monitors[1].0.txid, chan_1.3.txid()); @@ -3522,7 +3522,7 @@ fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap(); + let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap(); monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap() .provide_payment_preimage(&our_payment_hash, &our_payment_preimage); } @@ -4310,9 +4310,9 @@ fn test_no_txn_manager_serialize_deserialize() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let logger: test_utils::TestLogger; let fee_estimator: test_utils::TestFeeEstimator; - let new_chan_monitor: test_utils::TestChannelMonitor; + let new_chain_monitor: test_utils::TestChainMonitor; let keys_manager: test_utils::TestKeysInterface; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known()); @@ -4321,12 +4321,12 @@ fn test_no_txn_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; - new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); - nodes[0].chan_monitor = &new_chan_monitor; + new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); + nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read(&mut chan_0_monitor_read).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4337,11 +4337,11 @@ fn test_no_txn_manager_serialize_deserialize() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: config, keys_manager: &keys_manager, fee_estimator: &fee_estimator, - chain_monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors, @@ -4350,7 +4350,7 @@ fn test_no_txn_manager_serialize_deserialize() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; nodes[0].block_notifier.register_listener(nodes[0].node); assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -4385,9 +4385,9 @@ fn test_manager_serialize_deserialize_events() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let fee_estimator: test_utils::TestFeeEstimator; let logger: test_utils::TestLogger; - let new_chan_monitor: test_utils::TestChannelMonitor; + let new_chain_monitor: test_utils::TestChainMonitor; let keys_manager: test_utils::TestKeysInterface; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe @@ -4408,7 +4408,7 @@ fn test_manager_serialize_deserialize_events() { node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())); { - let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -4416,7 +4416,7 @@ fn test_manager_serialize_deserialize_events() { node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())); { - let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap(); + let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, funding_output); added_monitors.clear(); @@ -4429,12 +4429,12 @@ fn test_manager_serialize_deserialize_events() { // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; logger = test_utils::TestLogger::new(); - new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); - nodes[0].chan_monitor = &new_chan_monitor; + new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); + nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read(&mut chan_0_monitor_read).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4445,11 +4445,11 @@ fn test_manager_serialize_deserialize_events() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: config, keys_manager: &keys_manager, fee_estimator: &fee_estimator, - chain_monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors, @@ -4460,7 +4460,7 @@ fn test_manager_serialize_deserialize_events() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; // After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager @@ -4507,9 +4507,9 @@ fn test_simple_manager_serialize_deserialize() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let logger: test_utils::TestLogger; let fee_estimator: test_utils::TestFeeEstimator; - let new_chan_monitor: test_utils::TestChannelMonitor; + let new_chain_monitor: test_utils::TestChainMonitor; let keys_manager: test_utils::TestKeysInterface; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); @@ -4520,12 +4520,12 @@ fn test_simple_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; - new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); - nodes[0].chan_monitor = &new_chan_monitor; + new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); + nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read(&mut chan_0_monitor_read).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4535,11 +4535,11 @@ fn test_simple_manager_serialize_deserialize() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - chain_monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors, @@ -4548,7 +4548,7 @@ fn test_simple_manager_serialize_deserialize() { nodes_0_deserialized = nodes_0_deserialized_tmp; assert!(nodes_0_read.is_empty()); - assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); + assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok()); nodes[0].node = &nodes_0_deserialized; check_added_monitors!(nodes[0], 1); @@ -4566,16 +4566,16 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let logger: test_utils::TestLogger; let fee_estimator: test_utils::TestFeeEstimator; - let new_chan_monitor: test_utils::TestChannelMonitor; + let new_chain_monitor: test_utils::TestChainMonitor; let keys_manager: test_utils::TestKeysInterface; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known()); let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known()); let mut node_0_stale_monitors_serialized = Vec::new(); - for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() { + for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() { let mut writer = test_utils::TestVecWriter(Vec::new()); monitor.1.write_for_disk(&mut writer).unwrap(); node_0_stale_monitors_serialized.push(writer.0); @@ -4594,7 +4594,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/ // nodes[3]) let mut node_0_monitors_serialized = Vec::new(); - for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() { + for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() { let mut writer = test_utils::TestVecWriter(Vec::new()); monitor.1.write_for_disk(&mut writer).unwrap(); node_0_monitors_serialized.push(writer.0); @@ -4602,8 +4602,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; - new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); - nodes[0].chan_monitor = &new_chan_monitor; + new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator); + nodes[0].chain_monitor = &new_chain_monitor; let mut node_0_stale_monitors = Vec::new(); for serialized in node_0_stale_monitors_serialized.iter() { @@ -4625,11 +4625,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; if let Err(msgs::DecodeError::InvalidValue) = - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - chain_monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), @@ -4639,11 +4639,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; let (_, nodes_0_deserialized_tmp) = - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager: &keys_manager, fee_estimator: &fee_estimator, - chain_monitor: nodes[0].chan_monitor, + chain_monitor: nodes[0].chain_monitor, tx_broadcaster: nodes[0].tx_broadcaster.clone(), logger: &logger, channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(), @@ -4659,7 +4659,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { } for monitor in node_0_monitors.drain(..) { - assert!(nodes[0].chan_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok()); + assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok()); check_added_monitors!(nodes[0], 1); } nodes[0].node = &nodes_0_deserialized; @@ -4689,7 +4689,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { macro_rules! check_spendable_outputs { ($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => { { - let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events(); + let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); let mut txn = Vec::new(); for event in events { match event { @@ -5748,8 +5748,8 @@ fn test_key_derivation_params() { // We manually create the node configuration to backup the seed. let seed = [42; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chan_monitor, keys_manager, node_seed: seed }; + let chain_monitor = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator); + let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager, node_seed: seed }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); node_cfgs.insert(0, node); @@ -7206,7 +7206,7 @@ fn test_no_failure_dust_htlc_local_commitment() { }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - nodes[0].chan_monitor.simple_monitor.block_connected(&header, &[(0, &dummy_tx)], 1); + nodes[0].chain_monitor.chain_monitor.block_connected(&header, &[(0, &dummy_tx)], 1); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0); // We broadcast a few more block to check everything is all right @@ -7483,8 +7483,8 @@ fn test_data_loss_protect() { // Cache node A state before any channel update let previous_node_state = nodes[0].node.encode(); - let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new()); - nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap(); + let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new()); + nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chain_monitor_state).unwrap(); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000); @@ -7494,16 +7494,16 @@ fn test_data_loss_protect() { // Restore node A from previous state logger = test_utils::TestLogger::with_id(format!("node {}", 0)); - let mut chan_monitor = <(BlockHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0)).unwrap().1; + let mut chain_monitor = <(BlockHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0)).unwrap().1; chain_source = test_utils::TestChainSource::new(Network::Testnet); tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}; fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet); - monitor = test_utils::TestChannelMonitor::new(&tx_broadcaster, &logger, &fee_estimator); + monitor = test_utils::TestChainMonitor::new(&tx_broadcaster, &logger, &fee_estimator); node_state_0 = { let mut channel_monitors = HashMap::new(); - channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chan_monitor); - <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs { + channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor); + <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs { keys_manager: &keys_manager, fee_estimator: &fee_estimator, chain_monitor: &monitor, @@ -7514,12 +7514,12 @@ fn test_data_loss_protect() { }).unwrap().1 }; nodes[0].node = &node_state_0; - assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok()); - nodes[0].chan_monitor = &monitor; + assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok()); + nodes[0].chain_monitor = &monitor; nodes[0].chain_source = &chain_source; nodes[0].block_notifier = BlockNotifier::new(); - nodes[0].block_notifier.register_listener(&nodes[0].chan_monitor.simple_monitor); + nodes[0].block_notifier.register_listener(&nodes[0].chain_monitor.chain_monitor); nodes[0].block_notifier.register_listener(nodes[0].node); check_added_monitors!(nodes[0], 1); @@ -8232,7 +8232,7 @@ fn test_bump_txn_sanitize_tracking_maps() { connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130); connect_blocks(&nodes[0], 5, 130, false, header_130.block_hash()); { - let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) { assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty()); assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty()); @@ -8361,22 +8361,22 @@ fn test_update_err_monitor_lockdown() { // Route a HTLC from node 0 to node 1 (but don't settle) let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; - // Copy SimpleManyChannelMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain + // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let watchtower = { - let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write_for_disk(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0)).unwrap().1; assert!(new_monitor == *monitor); - let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); + let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok()); watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - watchtower.simple_monitor.block_connected(&header, &[], 200); + watchtower.chain_monitor.block_connected(&header, &[], 200); // Try to update ChannelMonitor assert!(nodes[1].node.claim_funds(preimage, &None, 9_000_000)); @@ -8386,8 +8386,8 @@ fn test_update_err_monitor_lockdown() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { - if let Err(_) = watchtower.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } - if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); } + if let Err(_) = watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } + if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } } else { assert!(false); }; // Our local monitor is in-sync and hasn't processed yet timeout @@ -8418,22 +8418,22 @@ fn test_concurrent_monitor_claim() { // Route a HTLC from node 0 to node 1 (but don't settle) route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; - // Copy SimpleManyChannelMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain + // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let watchtower_alice = { - let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write_for_disk(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0)).unwrap().1; assert!(new_monitor == *monitor); - let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); + let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok()); watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - watchtower_alice.simple_monitor.block_connected(&header, &vec![], 135); + watchtower_alice.chain_monitor.block_connected(&header, &vec![], 135); // Watchtower Alice should have broadcast a commitment/HTLC-timeout { @@ -8442,22 +8442,22 @@ fn test_concurrent_monitor_claim() { txn.clear(); } - // Copy SimpleManyChannelMonitor to simulate watchtower Bob and make it receive a commitment update first. + // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first. let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let watchtower_bob = { - let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write_for_disk(&mut w).unwrap(); let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0)).unwrap().1; assert!(new_monitor == *monitor); - let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); + let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator); assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok()); watchtower }; let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - watchtower_bob.simple_monitor.block_connected(&header, &vec![], 134); + watchtower_bob.chain_monitor.block_connected(&header, &vec![], 134); // Route another payment to generate another update with still previous HTLC pending let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]); @@ -8474,16 +8474,16 @@ fn test_concurrent_monitor_claim() { if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) { if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update - if let Err(_) = watchtower_alice.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } - if let Ok(_) = watchtower_bob.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } - if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); } + if let Err(_) = watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } + if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); } + if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); } } else { assert!(false); } } else { assert!(false); }; // Our local monitor is in-sync and hasn't processed yet timeout check_added_monitors!(nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - watchtower_bob.simple_monitor.block_connected(&header, &vec![], 135); + watchtower_bob.chain_monitor.block_connected(&header, &vec![], 135); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; @@ -8495,7 +8495,7 @@ fn test_concurrent_monitor_claim() { }; // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout - watchtower_alice.simple_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136); + watchtower_alice.chain_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136); { let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); // We broadcast twice the transaction, once due to the HTLC-timeout, once due diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 295636e33..e505774cc 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -59,27 +59,27 @@ impl chaininterface::FeeEstimator for TestFeeEstimator { } } -pub struct TestChannelMonitor<'a> { +pub struct TestChainMonitor<'a> { pub added_monitors: Mutex)>>, pub latest_monitor_update_id: Mutex>, - pub simple_monitor: channelmonitor::SimpleManyChannelMonitor, + pub chain_monitor: channelmonitor::ChainMonitor, pub update_ret: Mutex>, // If this is set to Some(), after the next return, we'll always return this until update_ret // is changed: pub next_update_ret: Mutex>>, } -impl<'a> TestChannelMonitor<'a> { +impl<'a> TestChainMonitor<'a> { pub fn new(broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator) -> Self { Self { added_monitors: Mutex::new(Vec::new()), latest_monitor_update_id: Mutex::new(HashMap::new()), - simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger, fee_estimator), + chain_monitor: channelmonitor::ChainMonitor::new(broadcaster, logger, fee_estimator), update_ret: Mutex::new(Ok(())), next_update_ret: Mutex::new(None), } } } -impl<'a> chain::Watch for TestChannelMonitor<'a> { +impl<'a> chain::Watch for TestChainMonitor<'a> { type Keys = EnforcingChannelKeys; fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> { @@ -92,7 +92,7 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> { assert!(new_monitor == monitor); self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id())); self.added_monitors.lock().unwrap().push((funding_txo, monitor)); - assert!(self.simple_monitor.watch_channel(funding_txo, new_monitor).is_ok()); + assert!(self.chain_monitor.watch_channel(funding_txo, new_monitor).is_ok()); let ret = self.update_ret.lock().unwrap().clone(); if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() { @@ -109,10 +109,10 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> { &mut ::std::io::Cursor::new(&w.0)).unwrap() == update); self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id)); - assert!(self.simple_monitor.update_channel(funding_txo, update).is_ok()); + assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok()); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... - let monitors = self.simple_monitor.monitors.lock().unwrap(); + let monitors = self.chain_monitor.monitors.lock().unwrap(); let monitor = monitors.get(&funding_txo).unwrap(); w.0.clear(); monitor.write_for_disk(&mut w).unwrap(); @@ -129,7 +129,7 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> { } fn release_pending_monitor_events(&self) -> Vec { - return self.simple_monitor.release_pending_monitor_events(); + return self.chain_monitor.release_pending_monitor_events(); } }