Rename SimpleManyChannelMonitor to ChainMonitor
authorJeffrey Czyz <jkczyz@gmail.com>
Tue, 21 Jul 2020 05:12:14 +0000 (22:12 -0700)
committerJeffrey Czyz <jkczyz@gmail.com>
Thu, 1 Oct 2020 05:39:55 +0000 (22:39 -0700)
ManyChannelMonitor was renamed chain::Watch in the previous commit. Use
a more concise name for an implementation that monitors the chain for
channel activity. Future work will parameterize the struct to allow for
different varieties of persistence. Thus, users usually will be able to
use ChainMonitor directly rather than implementing a chain::Watch that
wraps it.

ARCH.md
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning-net-tokio/src/lib.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channelmonitor.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/util/test_utils.rs

diff --git a/ARCH.md b/ARCH.md
index 1d987a3531457d78e92def84c536922a2a8c7ce5..b7276669a504ef3bfdd083cb82c9984676c2745b 100644 (file)
--- a/ARCH.md
+++ b/ARCH.md
@@ -39,24 +39,24 @@ At a high level, some of the common interfaces fit together as follows:
                      -----------------  | UserConfig |
          --------------------       |   --------------
   /------| MessageSendEvent |       |   |     ----------------
- |       --------------------       |   |     | FeeEstimator |
- |   (as MessageSendEventsProvider) |   |     ----------------
- |                         ^        |   |    /          |      ------------------------
- |                          \       |   |   /      ---------> | BroadcasterInterface |
- |                           \      |   |  /      /     |     ------------------------
- |                            \     v   v v      /      v        ^
- |    (as                      ------------------       ----------------
- |    ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch |
- v               /             ------------------       ----------------
---------------- /                          (as EventsProvider)
-| PeerManager |-                                \     /
----------------                                  \   /
- |                    -----------------           \ /
- |                    | chain::Access |            v
- |                    -----------------        ---------
- |                            |                | Event |
-(as RoutingMessageHandler)    v                ---------
-  \                   --------------------
-   -----------------> | NetGraphMsgHandler |
-                      --------------------
+ |       --------------------       |   |     | FeeEstimator | <-----------------------
+ |   (as MessageSendEventsProvider) |   |     ----------------                         \
+ |                         ^        |   |    /                 ------------------------ |
+ |                          \       |   |   /      ---------> | BroadcasterInterface |  |
+ |                           \      |   |  /      /           ------------------------  |
+ |                            \     v   v v      /                          ^           |
+ |    (as                      ------------------       ----------------    |           |
+ |    ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch |    |           |
+ v               /             ------------------       ----------------    |           |
+--------------- /                  (as EventsProvider)         ^            |           |
+| PeerManager |-                             \                 |            |           |
+---------------                               \                | (is-a)     |           |
+ |                    -----------------        \       _----------------   /           /
+ |                    | chain::Access |         \     / | ChainMonitor |---------------
+ |                    -----------------          \   /  ----------------
+ |                            |                   \ /
+(as RoutingMessageHandler)    v                    v
+  \                   --------------------     ---------
+   -----------------> | NetGraphMsgHandler |   | Event |
+                      --------------------     ---------
 ```
index 3ce6fa26fb1df0cb9545db7abacedb9bbf9fd172..332940b2f7a8278deb890eb5add202e81a3f6c19 100644 (file)
@@ -81,9 +81,9 @@ impl Writer for VecWriter {
        }
 }
 
-struct TestChannelMonitor {
+struct TestChainMonitor {
        pub logger: Arc<dyn Logger>,
-       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+       pub chain_monitor: Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
        // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
        // logic will automatically force-close our channels for us (as we don't have an up-to-date
@@ -93,10 +93,10 @@ struct TestChannelMonitor {
        pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
        pub should_update_manager: atomic::AtomicBool,
 }
-impl TestChannelMonitor {
+impl TestChainMonitor {
        pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
                Self {
-                       simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger.clone(), feeest)),
+                       chain_monitor: Arc::new(channelmonitor::ChainMonitor::new(broadcaster, logger.clone(), feeest)),
                        logger,
                        update_ret: Mutex::new(Ok(())),
                        latest_monitors: Mutex::new(HashMap::new()),
@@ -104,7 +104,7 @@ impl TestChannelMonitor {
                }
        }
 }
-impl chain::Watch for TestChannelMonitor {
+impl chain::Watch for TestChainMonitor {
        type Keys = EnforcingChannelKeys;
 
        fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
@@ -114,7 +114,7 @@ impl chain::Watch for TestChannelMonitor {
                        panic!("Already had monitor pre-watch_channel");
                }
                self.should_update_manager.store(true, atomic::Ordering::Relaxed);
-               assert!(self.simple_monitor.watch_channel(funding_txo, monitor).is_ok());
+               assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
                self.update_ret.lock().unwrap().clone()
        }
 
@@ -135,7 +135,7 @@ impl chain::Watch for TestChannelMonitor {
        }
 
        fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
-               return self.simple_monitor.release_pending_monitor_events();
+               return self.chain_monitor.release_pending_monitor_events();
        }
 }
 
@@ -191,7 +191,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        macro_rules! make_node {
                ($node_id: expr) => { {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
-                       let monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
+                       let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
 
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
@@ -206,7 +206,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        macro_rules! reload_node {
                ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
-                       let chain_monitor = Arc::new(TestChannelMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
+                       let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
 
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
@@ -235,7 +235,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                                channel_monitors: monitor_refs,
                        };
 
-                       (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
+                       (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
                } }
        }
 
index 61458e241c7725cea2baa53f938f1f000ea8cf40..8d1c6886562d8b2dc2d665414ec10da91e8550de 100644 (file)
@@ -145,13 +145,13 @@ impl<'a> std::hash::Hash for Peer<'a> {
 
 type ChannelMan = ChannelManager<
        EnforcingChannelKeys,
-       Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+       Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
 type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<NetGraphMsgHandler<Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan>,
-       monitor: Arc<channelmonitor::SimpleManyChannelMonitor<
+       monitor: Arc<channelmonitor::ChainMonitor<
                OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        handler: PeerMan<'a>,
 
@@ -166,7 +166,7 @@ struct MoneyLossDetector<'a> {
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
                   manager: Arc<ChannelMan>,
-                  monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
+                  monitor: Arc<channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
                   handler: PeerMan<'a>) -> Self {
                MoneyLossDetector {
                        manager,
@@ -334,7 +334,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        };
 
        let broadcast = Arc::new(TestBroadcaster{});
-       let monitor = Arc::new(channelmonitor::SimpleManyChannelMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
+       let monitor = Arc::new(channelmonitor::ChainMonitor::new(broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
 
        let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
        let mut config = UserConfig::default();
index 0757ffd5859e19859edc9963ffc44a3eee73ab57..d307116484fc77b201f6315ad594898222ae7030 100644 (file)
 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator;
 //! type Logger = dyn lightning::util::logger::Logger;
 //! type ChainAccess = dyn lightning::chain::Access;
-//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>>;
-//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChannelMonitor, TxBroadcaster, FeeEstimator, Logger>;
-//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChannelMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>;
+//! type ChainMonitor = lightning::ln::channelmonitor::ChainMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>>;
+//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>;
+//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>;
 //!
 //! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
+//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
 //!     let (sender, mut receiver) = mpsc::channel(2);
 //!     lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await;
 //!     loop {
 //!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//!         for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
 //!     }
 //! }
 //!
 //! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
+//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
 //!     let (sender, mut receiver) = mpsc::channel(2);
 //!     lightning_net_tokio::setup_inbound(peer_manager, sender, socket);
 //!     loop {
@@ -63,7 +63,7 @@
 //!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//!         for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
 //!     }
index aa549de2382f436639932d7e8a22dad063428e49..e99dca78b21052c90dcf15bb7896e76756566286 100644 (file)
@@ -41,7 +41,7 @@ fn test_simple_monitor_permanent_update_fail() {
 
        let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
@@ -76,7 +76,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
@@ -95,8 +95,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -125,7 +125,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Now set it to failed again...
        let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
        {
-               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+               *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
@@ -191,7 +191,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // Now try to send a second payment which will fail to send
        let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
        {
-               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+               *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
@@ -245,8 +245,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        }
 
        // Now fix monitor updating...
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -532,15 +532,15 @@ fn test_monitor_update_fail_cs() {
        let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        let responses = nodes[1].node.get_and_clear_pending_msg_events();
@@ -563,7 +563,7 @@ fn test_monitor_update_fail_cs() {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 
-                       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+                       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -573,8 +573,8 @@ fn test_monitor_update_fail_cs() {
                _ => panic!("Unexpected event"),
        }
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -622,7 +622,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -630,8 +630,8 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 0);
@@ -684,7 +684,7 @@ fn test_monitor_update_raa_while_paused() {
        check_added_monitors!(nodes[1], 1);
        let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -696,8 +696,8 @@ fn test_monitor_update_raa_while_paused() {
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -779,7 +779,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now fail monitor updating.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -797,7 +797,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                check_added_monitors!(nodes[0], 1);
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
        send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
@@ -858,8 +858,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Restore monitor updating, ensuring we immediately get a fail-back update and a
        // update_add update.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1020,7 +1020,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 
@@ -1049,8 +1049,8 @@ fn test_monitor_update_fail_reestablish() {
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1123,7 +1123,7 @@ fn raa_no_response_awaiting_raa_state() {
        // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
        // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
        // then restore channel monitor updates.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1135,8 +1135,8 @@ fn raa_no_response_awaiting_raa_state() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        // nodes[1] should be AwaitingRAA here!
        check_added_monitors!(nodes[1], 0);
@@ -1228,7 +1228,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1257,8 +1257,8 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now un-fail the monitor, which will result in B sending its original commitment update,
        // receiving the commitment update from A, and the resulting commitment dances.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1342,7 +1342,7 @@ fn monitor_failed_no_reestablish_response() {
                check_added_monitors!(nodes[0], 1);
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -1366,8 +1366,8 @@ fn monitor_failed_no_reestablish_response() {
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1445,7 +1445,7 @@ fn first_message_on_recv_ordering() {
        let payment_event = SendEvent::from_event(events.pop().unwrap());
        assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        // Deliver the final RAA for the first payment, which does not require a response. RAAs
        // generally require a commitment_signed, so the fact that we're expecting an opposite response
@@ -1464,8 +1464,8 @@ fn first_message_on_recv_ordering() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1509,7 +1509,7 @@ fn test_monitor_update_fail_claim() {
 
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
        check_added_monitors!(nodes[1], 1);
 
@@ -1523,7 +1523,7 @@ fn test_monitor_update_fail_claim() {
 
        // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
        // paused, so forward shouldn't succeed until we call channel_monitor_updated().
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
 
        let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -1556,7 +1556,7 @@ fn test_monitor_update_fail_claim() {
        } else { panic!("Unexpected event!"); }
 
        // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1612,14 +1612,14 @@ fn test_monitor_update_on_pending_forwards() {
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1675,15 +1675,15 @@ fn monitor_update_claim_fail_no_response() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1728,20 +1728,20 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
        check_added_monitors!(nodes[0], 0);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
        let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -1777,8 +1777,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1843,8 +1843,8 @@ fn test_path_paused_mpp() {
 
        // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
        // (for the path 0 -> 2 -> 3) fails.
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
 
        // Now check that we get the right return value, indicating that the first path succeeded but
        // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
@@ -1855,7 +1855,7 @@ fn test_path_paused_mpp() {
                if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
        } else { panic!(); }
        check_added_monitors!(nodes[0], 2);
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
 
        // Pass the first HTLC of the payment along to nodes[3].
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -1864,7 +1864,7 @@ fn test_path_paused_mpp() {
 
        // And check that, after we successfully update the monitor for chan_2 we can pass the second
        // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
index 4af7a5f62de105625dc663f00c907c718a249ec4..3f3f06f4064d49908aa3083261d28d2495dc24ba 100644 (file)
@@ -203,7 +203,7 @@ impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
 /// [`chain::Watch`]: ../../chain/trait.Watch.html
 ///
 /// (C-not exported) due to an unconstrained generic in `Key`
-pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref>
+pub struct ChainMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref>
        where T::Target: BroadcasterInterface,
         F::Target: FeeEstimator,
         L::Target: Logger,
@@ -278,7 +278,7 @@ impl WatchEventQueue {
 }
 
 impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
-       ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+       ChainListener for ChainMonitor<Key, ChanSigner, T, F, L>
        where T::Target: BroadcasterInterface,
              F::Target: FeeEstimator,
              L::Target: Logger,
@@ -308,23 +308,21 @@ impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync
        }
 }
 
-impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> ChainMonitor<Key, ChanSigner, T, F, L>
        where T::Target: BroadcasterInterface,
              F::Target: FeeEstimator,
              L::Target: Logger,
 {
        /// Creates a new object which can be used to monitor several channels given the chain
        /// interface with which to register to receive notifications.
-       pub fn new(broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L> {
-               let res = SimpleManyChannelMonitor {
+       pub fn new(broadcaster: T, logger: L, feeest: F) -> ChainMonitor<Key, ChanSigner, T, F, L> {
+               Self {
                        monitors: Mutex::new(HashMap::new()),
                        watch_events: Mutex::new(WatchEventQueue::new()),
                        broadcaster,
                        logger,
                        fee_estimator: feeest,
-               };
-
-               res
+               }
        }
 
        /// Adds or updates the monitor which monitors the channel referred to by the given key.
@@ -363,7 +361,7 @@ impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: De
        }
 }
 
-impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F, L>
+impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for ChainMonitor<OutPoint, ChanSigner, T, F, L>
        where T::Target: BroadcasterInterface,
              F::Target: FeeEstimator,
              L::Target: Logger,
@@ -393,7 +391,7 @@ impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L:
        }
 }
 
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> events::EventsProvider for ChainMonitor<Key, ChanSigner, T, F, L>
        where T::Target: BroadcasterInterface,
              F::Target: FeeEstimator,
              L::Target: Logger,
@@ -407,7 +405,7 @@ impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: De
        }
 }
 
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> chain::WatchEventProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L>
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref> chain::WatchEventProvider for ChainMonitor<Key, ChanSigner, T, F, L>
        where T::Target: BroadcasterInterface,
              F::Target: FeeEstimator,
              L::Target: Logger,
@@ -1474,7 +1472,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// Gets the list of pending events which were generated by previous actions, clearing the list
        /// in the process.
        ///
-       /// This is called by SimpleManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
+       /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to
        /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
        /// no internal locking in ChannelMonitors.
        pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
index c211829cf65abaa90c9edf8b97a295302dbc2c08..962f8001f4359173cfd840999a9f4ac4a56c7d93 100644 (file)
@@ -23,7 +23,7 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
 use util::enforcing_trait_impls::EnforcingChannelKeys;
 use util::test_utils;
-use util::test_utils::TestChannelMonitor;
+use util::test_utils::TestChainMonitor;
 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::errors::APIError;
 use util::config::UserConfig;
@@ -85,14 +85,14 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block,
        use chain::WatchEventProvider;
        use chain::chaininterface::ChainListener;
 
-       let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events();
+       let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events();
        process_chain_watch_events(&watch_events);
 
        let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
        loop {
-               node.chan_monitor.simple_monitor.block_connected(&block.header, &txdata, height);
+               node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
 
-               let watch_events = node.chan_monitor.simple_monitor.release_pending_watch_events();
+               let watch_events = node.chain_monitor.chain_monitor.release_pending_watch_events();
                process_chain_watch_events(&watch_events);
 
                if watch_events.is_empty() {
@@ -120,7 +120,7 @@ pub struct NodeCfg<'a> {
        pub chain_source: &'a test_utils::TestChainSource,
        pub tx_broadcaster: &'a test_utils::TestBroadcaster,
        pub fee_estimator: &'a test_utils::TestFeeEstimator,
-       pub chan_monitor: test_utils::TestChannelMonitor<'a>,
+       pub chain_monitor: test_utils::TestChainMonitor<'a>,
        pub keys_manager: test_utils::TestKeysInterface,
        pub logger: &'a test_utils::TestLogger,
        pub node_seed: [u8; 32],
@@ -130,9 +130,9 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> {
        pub block_notifier: chaininterface::BlockNotifierRef<'a>,
        pub chain_source: &'c test_utils::TestChainSource,
        pub tx_broadcaster: &'c test_utils::TestBroadcaster,
-       pub chan_monitor: &'b test_utils::TestChannelMonitor<'c>,
+       pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
        pub keys_manager: &'b test_utils::TestKeysInterface,
-       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
+       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
        pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>,
        pub node_seed: [u8; 32],
        pub network_payment_count: Rc<RefCell<u8>>,
@@ -146,7 +146,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        // Check that we processed all pending events
                        assert!(self.node.get_and_clear_pending_msg_events().is_empty());
                        assert!(self.node.get_and_clear_pending_events().is_empty());
-                       assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
+                       assert!(self.chain_monitor.added_monitors.lock().unwrap().is_empty());
 
                        // Check that if we serialize the Router, we can deserialize it again.
                        {
@@ -186,7 +186,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
                        let mut deserialized_monitors = Vec::new();
                        {
-                               let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                               let old_monitors = self.chain_monitor.chain_monitor.monitors.lock().unwrap();
                                for (_, old_monitor) in old_monitors.iter() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        old_monitor.write_for_disk(&mut w).unwrap();
@@ -206,18 +206,18 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
 
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
-                               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+                               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
                                        default_config: UserConfig::default(),
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 },
-                                       chain_monitor: self.chan_monitor,
+                                       chain_monitor: self.chain_monitor,
                                        tx_broadcaster: self.tx_broadcaster.clone(),
                                        logger: &test_utils::TestLogger::new(),
                                        channel_monitors,
                                }).unwrap();
                        }
 
-                       let channel_monitor = test_utils::TestChannelMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest);
+                       let channel_monitor = test_utils::TestChainMonitor::new(self.tx_broadcaster.clone(), &self.logger, &feeest);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
                                if let Err(_) = channel_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
                                        panic!();
@@ -309,7 +309,7 @@ macro_rules! get_feerate {
 macro_rules! get_local_commitment_txn {
        ($node: expr, $channel_id: expr) => {
                {
-                       let mut monitors = $node.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                       let mut monitors = $node.chain_monitor.chain_monitor.monitors.lock().unwrap();
                        let mut commitment_txn = None;
                        for (funding_txo, monitor) in monitors.iter_mut() {
                                if funding_txo.to_channel_id() == $channel_id {
@@ -347,7 +347,7 @@ macro_rules! unwrap_send_err {
 macro_rules! check_added_monitors {
        ($node: expr, $count: expr) => {
                {
-                       let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                       let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap();
                        assert_eq!(added_monitors.len(), $count);
                        added_monitors.clear();
                }
@@ -386,7 +386,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
        {
-               let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -394,7 +394,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 
        node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
        {
-               let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -1122,39 +1122,39 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMon
        for i in 0..node_count {
                let seed = [i as u8; 32];
                let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-               let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
-               nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chan_monitor, keys_manager, node_seed: seed });
+               let chain_monitor = test_utils::TestChainMonitor::new(&chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
+               nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chain_monitor, keys_manager, node_seed: seed });
        }
 
        nodes
 }
 
-pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
+pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChainMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
        let mut chanmgrs = Vec::new();
        for i in 0..node_count {
                let mut default_config = UserConfig::default();
                default_config.channel_options.announced_channel = true;
                default_config.peer_channel_config_limits.force_announced_channel_preference = false;
                default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit
-               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
+               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
                chanmgrs.push(node);
        }
 
        chanmgrs
 }
 
-pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
+pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
        let payment_count = Rc::new(RefCell::new(0));
 
        for i in 0..node_count {
                let block_notifier = chaininterface::BlockNotifier::new();
-               block_notifier.register_listener(&cfgs[i].chan_monitor.simple_monitor as &chaininterface::ChainListener);
+               block_notifier.register_listener(&cfgs[i].chain_monitor.chain_monitor as &chaininterface::ChainListener);
                block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener);
                let net_graph_msg_handler = NetGraphMsgHandler::new(None, cfgs[i].logger);
                nodes.push(Node{ chain_source: cfgs[i].chain_source, block_notifier,
-                                tx_broadcaster: cfgs[i].tx_broadcaster, chan_monitor: &cfgs[i].chan_monitor,
+                                tx_broadcaster: cfgs[i].tx_broadcaster, chain_monitor: &cfgs[i].chain_monitor,
                                 keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
                                 node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
                                 network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
index 2635eb7ba6c83da106480980198dfe048281da7e..08e8187ad9fe4ebf78bcaedb75da357440f2dcbf 100644 (file)
@@ -476,7 +476,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        if steps & 0x0f == 4 { return; }
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -486,7 +486,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        if steps & 0x0f == 5 { return; }
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
        {
-               let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -660,7 +660,7 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        // nothing happens since node[1] is in AwaitingRemoteRevoke
        nodes[1].node.send_payment(&route, our_payment_hash, &None).unwrap();
        {
-               let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 0);
                added_monitors.clear();
        }
@@ -2847,14 +2847,14 @@ fn test_htlc_on_chain_success() {
        // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
        connect_block(&nodes[1], &Block { header, txdata: node_txn}, 1);
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
                added_monitors.clear();
        }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 2);
                assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
                assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
@@ -3522,7 +3522,7 @@ fn test_force_close_fail_back() {
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
-               let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
                monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
        }
@@ -4310,9 +4310,9 @@ fn test_no_txn_manager_serialize_deserialize() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
@@ -4321,12 +4321,12 @@ fn test_no_txn_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4337,11 +4337,11 @@ fn test_no_txn_manager_serialize_deserialize() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: config,
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       chain_monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4350,7 +4350,7 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
        nodes[0].block_notifier.register_listener(nodes[0].node);
        assert_eq!(nodes[0].node.list_channels().len(), 1);
@@ -4385,9 +4385,9 @@ fn test_manager_serialize_deserialize_events() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let fee_estimator: test_utils::TestFeeEstimator;
        let logger: test_utils::TestLogger;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe
@@ -4408,7 +4408,7 @@ fn test_manager_serialize_deserialize_events() {
 
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
        {
-               let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -4416,7 +4416,7 @@ fn test_manager_serialize_deserialize_events() {
 
        node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
        {
-               let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -4429,12 +4429,12 @@ fn test_manager_serialize_deserialize_events() {
        // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
        logger = test_utils::TestLogger::new();
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4445,11 +4445,11 @@ fn test_manager_serialize_deserialize_events() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: config,
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       chain_monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4460,7 +4460,7 @@ fn test_manager_serialize_deserialize_events() {
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
 
        // After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager
@@ -4507,9 +4507,9 @@ fn test_simple_manager_serialize_deserialize() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
@@ -4520,12 +4520,12 @@ fn test_simple_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4535,11 +4535,11 @@ fn test_simple_manager_serialize_deserialize() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: UserConfig::default(),
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       chain_monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4548,7 +4548,7 @@ fn test_simple_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4566,16 +4566,16 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
 
        let mut node_0_stale_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write_for_disk(&mut writer).unwrap();
                node_0_stale_monitors_serialized.push(writer.0);
@@ -4594,7 +4594,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
        // nodes[3])
        let mut node_0_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write_for_disk(&mut writer).unwrap();
                node_0_monitors_serialized.push(writer.0);
@@ -4602,8 +4602,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
 
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
@@ -4625,11 +4625,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        let mut nodes_0_read = &nodes_0_serialized[..];
        if let Err(msgs::DecodeError::InvalidValue) =
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
                keys_manager: &keys_manager,
                fee_estimator: &fee_estimator,
-               chain_monitor: nodes[0].chan_monitor,
+               chain_monitor: nodes[0].chain_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
                channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
@@ -4639,11 +4639,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        let mut nodes_0_read = &nodes_0_serialized[..];
        let (_, nodes_0_deserialized_tmp) =
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
                keys_manager: &keys_manager,
                fee_estimator: &fee_estimator,
-               chain_monitor: nodes[0].chan_monitor,
+               chain_monitor: nodes[0].chain_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
                channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
@@ -4659,7 +4659,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        for monitor in node_0_monitors.drain(..) {
-               assert!(nodes[0].chan_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
+               assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
@@ -4689,7 +4689,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 macro_rules! check_spendable_outputs {
        ($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => {
                {
-                       let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events();
+                       let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
                        let mut txn = Vec::new();
                        for event in events {
                                match event {
@@ -5748,8 +5748,8 @@ fn test_key_derivation_params() {
        // We manually create the node configuration to backup the seed.
        let seed = [42; 32];
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-       let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
-       let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chan_monitor, keys_manager, node_seed: seed };
+       let chain_monitor = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
+       let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager, node_seed: seed };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        node_cfgs.remove(0);
        node_cfgs.insert(0, node);
@@ -7206,7 +7206,7 @@ fn test_no_failure_dust_htlc_local_commitment() {
        };
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].chan_monitor.simple_monitor.block_connected(&header, &[(0, &dummy_tx)], 1);
+       nodes[0].chain_monitor.chain_monitor.block_connected(&header, &[(0, &dummy_tx)], 1);
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
        // We broadcast a few more block to check everything is all right
@@ -7483,8 +7483,8 @@ fn test_data_loss_protect() {
 
        // Cache node A state before any channel update
        let previous_node_state = nodes[0].node.encode();
-       let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
+       let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chain_monitor_state).unwrap();
 
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
@@ -7494,16 +7494,16 @@ fn test_data_loss_protect() {
 
        // Restore node A from previous state
        logger = test_utils::TestLogger::with_id(format!("node {}", 0));
-       let mut chan_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0)).unwrap().1;
+       let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0)).unwrap().1;
        chain_source = test_utils::TestChainSource::new(Network::Testnet);
        tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
        keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
-       monitor = test_utils::TestChannelMonitor::new(&tx_broadcaster, &logger, &fee_estimator);
+       monitor = test_utils::TestChainMonitor::new(&tx_broadcaster, &logger, &fee_estimator);
        node_state_0 = {
                let mut channel_monitors = HashMap::new();
-               channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chan_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+               channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
                        chain_monitor: &monitor,
@@ -7514,12 +7514,12 @@ fn test_data_loss_protect() {
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
-       assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok());
-       nodes[0].chan_monitor = &monitor;
+       assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+       nodes[0].chain_monitor = &monitor;
        nodes[0].chain_source = &chain_source;
 
        nodes[0].block_notifier = BlockNotifier::new();
-       nodes[0].block_notifier.register_listener(&nodes[0].chan_monitor.simple_monitor);
+       nodes[0].block_notifier.register_listener(&nodes[0].chain_monitor.chain_monitor);
        nodes[0].block_notifier.register_listener(nodes[0].node);
 
        check_added_monitors!(nodes[0], 1);
@@ -8232,7 +8232,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
        connect_blocks(&nodes[0], 5, 130,  false, header_130.block_hash());
        {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
                        assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
                        assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
@@ -8361,22 +8361,22 @@ fn test_update_err_monitor_lockdown() {
        // Route a HTLC from node 0 to node 1 (but don't settle)
        let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
 
-       // Copy SimpleManyChannelMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
+       // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
        let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
        let watchtower = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower.simple_monitor.block_connected(&header, &[], 200);
+       watchtower.chain_monitor.block_connected(&header, &[], 200);
 
        // Try to update ChannelMonitor
        assert!(nodes[1].node.claim_funds(preimage, &None, 9_000_000));
@@ -8386,8 +8386,8 @@ fn test_update_err_monitor_lockdown() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
-                       if let Err(_) =  watchtower.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -8418,22 +8418,22 @@ fn test_concurrent_monitor_claim() {
        // Route a HTLC from node 0 to node 1 (but don't settle)
        route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
 
-       // Copy SimpleManyChannelMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
+       // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
        let watchtower_alice = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower_alice.simple_monitor.block_connected(&header, &vec![], 135);
+       watchtower_alice.chain_monitor.block_connected(&header, &vec![], 135);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
        {
@@ -8442,22 +8442,22 @@ fn test_concurrent_monitor_claim() {
                txn.clear();
        }
 
-       // Copy SimpleManyChannelMonitor to simulate watchtower Bob and make it receive a commitment update first.
+       // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
        let watchtower_bob = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               let watchtower = test_utils::TestChainMonitor::new(&chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
                assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower_bob.simple_monitor.block_connected(&header, &vec![], 134);
+       watchtower_bob.chain_monitor.block_connected(&header, &vec![], 134);
 
        // Route another payment to generate another update with still previous HTLC pending
        let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -8474,16 +8474,16 @@ fn test_concurrent_monitor_claim() {
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
-                       if let Err(_) =  watchtower_alice.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = watchtower_bob.simple_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chan_monitor.update_channel(outpoint, update) {} else { assert!(false); }
+                       if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
 
        //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
-       watchtower_bob.simple_monitor.block_connected(&header, &vec![], 135);
+       watchtower_bob.chain_monitor.block_connected(&header, &vec![], 135);
 
        // Watchtower Bob should have broadcast a commitment/HTLC-timeout
        let bob_state_y;
@@ -8495,7 +8495,7 @@ fn test_concurrent_monitor_claim() {
        };
 
        // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
-       watchtower_alice.simple_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136);
+       watchtower_alice.chain_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136);
        {
                let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // We broadcast twice the transaction, once due to the HTLC-timeout, once due
index 295636e33a222555f8aa273ced12718dac2131e5..e505774cc47c6d987ce45ddbc9e4b3840225527a 100644 (file)
@@ -59,27 +59,27 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
        }
 }
 
-pub struct TestChannelMonitor<'a> {
+pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
        pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
-       pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger>,
+       pub chain_monitor: channelmonitor::ChainMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
        // If this is set to Some(), after the next return, we'll always return this until update_ret
        // is changed:
        pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
 }
-impl<'a> TestChannelMonitor<'a> {
+impl<'a> TestChainMonitor<'a> {
        pub fn new(broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        latest_monitor_update_id: Mutex::new(HashMap::new()),
-                       simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(broadcaster, logger, fee_estimator),
+                       chain_monitor: channelmonitor::ChainMonitor::new(broadcaster, logger, fee_estimator),
                        update_ret: Mutex::new(Ok(())),
                        next_update_ret: Mutex::new(None),
                }
        }
 }
-impl<'a> chain::Watch for TestChannelMonitor<'a> {
+impl<'a> chain::Watch for TestChainMonitor<'a> {
        type Keys = EnforcingChannelKeys;
 
        fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
@@ -92,7 +92,7 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> {
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
-               assert!(self.simple_monitor.watch_channel(funding_txo, new_monitor).is_ok());
+               assert!(self.chain_monitor.watch_channel(funding_txo, new_monitor).is_ok());
 
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
@@ -109,10 +109,10 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> {
                                &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
 
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
-               assert!(self.simple_monitor.update_channel(funding_txo, update).is_ok());
+               assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok());
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
-               let monitors = self.simple_monitor.monitors.lock().unwrap();
+               let monitors = self.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&funding_txo).unwrap();
                w.0.clear();
                monitor.write_for_disk(&mut w).unwrap();
@@ -129,7 +129,7 @@ impl<'a> chain::Watch for TestChannelMonitor<'a> {
        }
 
        fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
-               return self.simple_monitor.release_pending_monitor_events();
+               return self.chain_monitor.release_pending_monitor_events();
        }
 }