Merge pull request #649 from jkczyz/2020-06-refactor-chain-listener
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 1 Oct 2020 18:10:13 +0000 (11:10 -0700)
committerGitHub <noreply@github.com>
Thu, 1 Oct 2020 18:10:13 +0000 (11:10 -0700)
Refactor chain monitoring

27 files changed:
ARCH.md
fuzz/src/chanmon_consistency.rs
fuzz/src/chanmon_deser.rs
fuzz/src/full_stack.rs
fuzz/src/router.rs
lightning-net-tokio/src/lib.rs
lightning/src/chain/chaininterface.rs
lightning/src/chain/chainmonitor.rs [new file with mode: 0644]
lightning/src/chain/channelmonitor.rs [new file with mode: 0644]
lightning/src/chain/mod.rs
lightning/src/chain/transaction.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/channelmonitor.rs [deleted file]
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/onchaintx.rs
lightning/src/ln/onion_route_tests.rs
lightning/src/ln/reorg_tests.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/util/errors.rs
lightning/src/util/macro_logger.rs
lightning/src/util/test_utils.rs

diff --git a/ARCH.md b/ARCH.md
index c4f94280c4890c8a5064339ab8b553fb735f93b0..5b9304cce009f8d9b3eb1a7fcff3721fc3efb094 100644 (file)
--- a/ARCH.md
+++ b/ARCH.md
@@ -6,7 +6,7 @@ need to use are `ChannelManager` and `ChannelMonitor`. `ChannelManager` holds mu
 channels, routes payments between them, and exposes a simple API to make and receive
 payments. Individual `ChannelMonitor`s monitor the on-chain state of a channel, punish
 counterparties if they misbehave, and force-close channels if they contain unresolved
-HTLCs which are near expiration. The `ManyChannelMonitor` API provides a way for you to
+HTLCs which are near expiration. The `chain::Watch` interface provides a way for you to
 receive `ChannelMonitorUpdate`s from `ChannelManager` and persist them to disk before the
 channel steps forward.
 
@@ -37,26 +37,26 @@ At a high level, some of the common interfaces fit together as follows:
                      -----------------
                      | KeysInterface |  --------------
                      -----------------  | UserConfig |
-         --------------------       |   --------------
-  /------| MessageSendEvent |       |   |     ----------------
- |       --------------------       |   |     | FeeEstimator |
- |   (as MessageSendEventsProvider) |   |     ----------------
- |                         ^        |   |    /          |      ------------------------
- |                          \       |   |   /      ---------> | BroadcasterInterface |
- |                           \      |   |  /      /     |     ------------------------
- |                            \     v   v v      /      v        ^
- |    (as                      ------------------       ----------------------
- |    ChannelMessageHandler)-> | ChannelManager | ----> | ManyChannelMonitor |
- v               /             ------------------       ----------------------
---------------- /                ^         (as EventsProvider)   ^
-| PeerManager |-                 |              \     /         /
----------------                  |        -------\---/----------
- |              -----------------------  /        \ /
- |              | ChainWatchInterface | -          v
- |              -----------------------        ---------
- |                            |                | Event |
-(as RoutingMessageHandler)    v                ---------
-  \                   --------------------
-   -----------------> | NetGraphMsgHandler |
-                      --------------------
+         --------------------       ^   --------------
+   ------| MessageSendEvent |       |   ^     ----------------
+  /      --------------------       |   |     | FeeEstimator | <-----------------------
+ |   (as MessageSendEventsProvider) |   |     ----------------                         \
+ |                         ^        |   |    ^                ------------------------  |
+ |                          \       |   |   /      ---------> | BroadcasterInterface |  |
+ |                           \      |   |  /      /           ------------------------  |
+ |                            \     |   | /      /                          ^           |
+ |    (as                      ------------------       ----------------    |           |
+ |    ChannelMessageHandler)-> | ChannelManager | ----> | chain::Watch |    |           |
+ v               /             ------------------       ----------------    |           |
+--------------- /                  (as EventsProvider)         ^            |           |
+| PeerManager |-                             \                 |            |           |
+---------------                               \                | (is-a)     |           |
+ |                    -----------------        \       _----------------   /           /
+ |                    | chain::Access |         \     / | ChainMonitor |---------------
+ |                    -----------------          \   /  ----------------
+ |                            ^                   \ /          |
+(as RoutingMessageHandler)    |                    v           v
+  \                   ----------------------   ---------   -----------------
+   -----------------> | NetGraphMsgHandler |   | Event |   | chain::Filter |
+                      ----------------------   ---------   -----------------
 ```
index ca05e5db942ae4a10fcf7257a3e69f5bc939f374..1650e2e25f7060fa8c63c568257983784bcbe721 100644 (file)
@@ -28,12 +28,13 @@ use bitcoin::hashes::Hash as TraitImport;
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{BlockHash, WPubkeyHash};
 
-use lightning::chain::chaininterface;
+use lightning::chain;
+use lightning::chain::chainmonitor;
+use lightning::chain::channelmonitor;
+use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
 use lightning::chain::transaction::OutPoint;
-use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil,ChainWatchInterface};
+use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
-use lightning::ln::channelmonitor;
-use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
 use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs};
 use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
 use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init};
@@ -81,9 +82,9 @@ impl Writer for VecWriter {
        }
 }
 
-struct TestChannelMonitor {
+struct TestChainMonitor {
        pub logger: Arc<dyn Logger>,
-       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<dyn ChainWatchInterface>>>,
+       pub chain_monitor: Arc<chainmonitor::ChainMonitor<EnforcingChannelKeys, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
        // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
        // logic will automatically force-close our channels for us (as we don't have an up-to-date
@@ -93,10 +94,10 @@ struct TestChannelMonitor {
        pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
        pub should_update_manager: atomic::AtomicBool,
 }
-impl TestChannelMonitor {
-       pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
+impl TestChainMonitor {
+       pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>) -> Self {
                Self {
-                       simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)),
+                       chain_monitor: Arc::new(chainmonitor::ChainMonitor::new(None, broadcaster, logger.clone(), feeest)),
                        logger,
                        update_ret: Mutex::new(Ok(())),
                        latest_monitors: Mutex::new(HashMap::new()),
@@ -104,21 +105,21 @@ impl TestChannelMonitor {
                }
        }
 }
-impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
+impl chain::Watch for TestChainMonitor {
        type Keys = EnforcingChannelKeys;
 
-       fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                let mut ser = VecWriter(Vec::new());
                monitor.write_for_disk(&mut ser).unwrap();
                if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
-                       panic!("Already had monitor pre-add_monitor");
+                       panic!("Already had monitor pre-watch_channel");
                }
                self.should_update_manager.store(true, atomic::Ordering::Relaxed);
-               assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
+               assert!(self.chain_monitor.watch_channel(funding_txo, monitor).is_ok());
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                let mut map_lock = self.latest_monitors.lock().unwrap();
                let mut map_entry = match map_lock.entry(funding_txo) {
                        hash_map::Entry::Occupied(entry) => entry,
@@ -134,8 +135,8 @@ impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
-               return self.simple_monitor.get_and_clear_pending_monitor_events();
+       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+               return self.chain_monitor.release_pending_monitor_events();
        }
 }
 
@@ -191,8 +192,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        macro_rules! make_node {
                ($node_id: expr) => { {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
-                       let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
-                       let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
+                       let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
 
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
@@ -207,8 +207,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        macro_rules! reload_node {
                ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
                        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
-                       let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
-                       let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
+                       let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone()));
 
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
@@ -220,7 +219,7 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                        let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
                        for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
                                monitors.insert(outpoint, <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser)).expect("Failed to read monitor").1);
-                               monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
+                               chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
                        }
                        let mut monitor_refs = HashMap::new();
                        for (outpoint, monitor) in monitors.iter_mut() {
@@ -230,14 +229,14 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
                        let read_args = ChannelManagerReadArgs {
                                keys_manager,
                                fee_estimator: fee_est.clone(),
-                               monitor: monitor.clone(),
+                               chain_monitor: chain_monitor.clone(),
                                tx_broadcaster: broadcast.clone(),
                                logger,
                                default_config: config,
                                channel_monitors: monitor_refs,
                        };
 
-                       (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
+                       (<(BlockHash, ChannelManager<EnforcingChannelKeys, Arc<TestChainMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor)
                } }
        }
 
@@ -308,16 +307,11 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        macro_rules! confirm_txn {
                ($node: expr) => { {
                        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       let mut txn = Vec::with_capacity(channel_txn.len());
-                       let mut posn = Vec::with_capacity(channel_txn.len());
-                       for i in 0..channel_txn.len() {
-                               txn.push(&channel_txn[i]);
-                               posn.push(i + 1);
-                       }
-                       $node.block_connected(&header, 1, &txn, &posn);
+                       let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
+                       $node.block_connected(&header, &txdata, 1);
                        for i in 2..100 {
                                header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                               $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
+                               $node.block_connected(&header, &[], i);
                        }
                } }
        }
index 3f4ff5ad0a131789cf12fc00ef0c1f560d067f73..5a76340ff309447c9cbae3baa0b112353b027210 100644 (file)
@@ -3,8 +3,8 @@
 
 use bitcoin::hash_types::BlockHash;
 
+use lightning::chain::channelmonitor;
 use lightning::util::enforcing_trait_impls::EnforcingChannelKeys;
-use lightning::ln::channelmonitor;
 use lightning::util::ser::{Readable, Writer};
 
 use utils::test_logger;
index 42da4d90312eaca635507eb58c9996977c92a4d5..1ed17b9ea3ff0831e4767854cb1f4c54a1ae168f 100644 (file)
@@ -25,10 +25,11 @@ use bitcoin::hashes::HashEngine as TraitImportEngine;
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
 
-use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
+use lightning::chain;
+use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
+use lightning::chain::chainmonitor;
 use lightning::chain::transaction::OutPoint;
 use lightning::chain::keysinterface::{InMemoryChannelKeys, KeysInterface};
-use lightning::ln::channelmonitor;
 use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret};
 use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor};
 use lightning::routing::router::get_route;
@@ -144,14 +145,13 @@ impl<'a> std::hash::Hash for Peer<'a> {
 
 type ChannelMan = ChannelManager<
        EnforcingChannelKeys,
-       Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<ChainWatchInterfaceUtil>>>,
+       Arc<chainmonitor::ChainMonitor<EnforcingChannelKeys, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>, Arc<dyn Logger>>;
-type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<NetGraphMsgHandler<Arc<ChainWatchInterfaceUtil>, Arc<dyn Logger>>>, Arc<dyn Logger>>;
+type PeerMan<'a> = PeerManager<Peer<'a>, Arc<ChannelMan>, Arc<NetGraphMsgHandler<Arc<dyn chain::Access>, Arc<dyn Logger>>>, Arc<dyn Logger>>;
 
 struct MoneyLossDetector<'a> {
        manager: Arc<ChannelMan>,
-       monitor: Arc<channelmonitor::SimpleManyChannelMonitor<
-               OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<ChainWatchInterfaceUtil>>>,
+       monitor: Arc<chainmonitor::ChainMonitor<EnforcingChannelKeys, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
        handler: PeerMan<'a>,
 
        peers: &'a RefCell<[bool; 256]>,
@@ -165,7 +165,7 @@ struct MoneyLossDetector<'a> {
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
                   manager: Arc<ChannelMan>,
-                  monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>, Arc<ChainWatchInterfaceUtil>>>,
+                  monitor: Arc<chainmonitor::ChainMonitor<EnforcingChannelKeys, Arc<dyn chain::Filter>, Arc<TestBroadcaster>, Arc<FuzzEstimator>, Arc<dyn Logger>>>,
                   handler: PeerMan<'a>) -> Self {
                MoneyLossDetector {
                        manager,
@@ -183,15 +183,13 @@ impl<'a> MoneyLossDetector<'a> {
        }
 
        fn connect_block(&mut self, all_txn: &[Transaction]) {
-               let mut txn = Vec::with_capacity(all_txn.len());
-               let mut txn_idxs = Vec::with_capacity(all_txn.len());
+               let mut txdata = Vec::with_capacity(all_txn.len());
                for (idx, tx) in all_txn.iter().enumerate() {
                        let txid = tx.txid();
                        match self.txids_confirmed.entry(txid) {
                                hash_map::Entry::Vacant(e) => {
                                        e.insert(self.height);
-                                       txn.push(tx);
-                                       txn_idxs.push(idx + 1);
+                                       txdata.push((idx + 1, tx));
                                },
                                _ => {},
                        }
@@ -200,8 +198,8 @@ impl<'a> MoneyLossDetector<'a> {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
                self.height += 1;
                self.blocks_connected += 1;
-               self.manager.block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
-               (*self.monitor).block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
+               self.manager.block_connected(&header, &txdata, self.height as u32);
+               (*self.monitor).block_connected(&header, &txdata, self.height as u32);
                if self.header_hashes.len() > self.height {
                        self.header_hashes[self.height] = header.block_hash();
                } else {
@@ -214,7 +212,7 @@ impl<'a> MoneyLossDetector<'a> {
        fn disconnect_block(&mut self) {
                if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
                        let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       self.manager.block_disconnected(&header, self.height as u32);
+                       self.manager.block_disconnected(&header);
                        self.monitor.block_disconnected(&header, self.height as u32);
                        self.height -= 1;
                        let removal_height = self.height;
@@ -334,9 +332,8 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                Err(_) => return,
        };
 
-       let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
        let broadcast = Arc::new(TestBroadcaster{});
-       let monitor = Arc::new(channelmonitor::SimpleManyChannelMonitor::new(watch.clone(), broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
+       let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone()));
 
        let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
        let mut config = UserConfig::default();
@@ -345,7 +342,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
        let channelmanager = Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0));
        let our_id = PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret());
-       let net_graph_msg_handler = Arc::new(NetGraphMsgHandler::new(watch.clone(), Arc::clone(&logger)));
+       let net_graph_msg_handler = Arc::new(NetGraphMsgHandler::new(None, Arc::clone(&logger)));
 
        let peers = RefCell::new([false; 256]);
        let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler {
@@ -903,6 +900,6 @@ mod tests {
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 8
                assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9
-               assert_eq!(log_entries.get(&("lightning::ln::channelmonitor".to_string(), "Input spending counterparty commitment tx (00000000000000000000000000000000000000000000000000000000000000a1:0) in 0000000000000000000000000000000000000000000000000000000000000018 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10
+               assert_eq!(log_entries.get(&("lightning::chain::channelmonitor".to_string(), "Input spending counterparty commitment tx (00000000000000000000000000000000000000000000000000000000000000a1:0) in 0000000000000000000000000000000000000000000000000000000000000018 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10
        }
 }
index ffc4608376b71f9064bc4ad389100dcf326dc2b3..5f2114b64e169eaa875b18afe6a41f3b3d566de2 100644 (file)
@@ -7,11 +7,11 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-use bitcoin::blockdata::script::{Script, Builder};
-use bitcoin::blockdata::block::Block;
-use bitcoin::hash_types::{Txid, BlockHash};
+use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::transaction::TxOut;
+use bitcoin::hash_types::BlockHash;
 
-use lightning::chain::chaininterface::{ChainError,ChainWatchInterface};
+use lightning::chain;
 use lightning::ln::channelmanager::ChannelDetails;
 use lightning::ln::features::InitFeatures;
 use lightning::ln::msgs;
@@ -76,26 +76,16 @@ impl InputData {
        }
 }
 
-struct DummyChainWatcher {
+struct FuzzChainSource {
        input: Arc<InputData>,
 }
-
-impl ChainWatchInterface for DummyChainWatcher {
-       fn install_watch_tx(&self, _txid: &Txid, _script_pub_key: &Script) { }
-       fn install_watch_outpoint(&self, _outpoint: (Txid, u32), _out_script: &Script) { }
-       fn watch_all_txn(&self) { }
-       fn filter_block(&self, _block: &Block) -> Vec<usize> {
-               Vec::new()
-       }
-       fn reentered(&self) -> usize { 0 }
-
-       fn get_chain_utxo(&self, _genesis_hash: BlockHash, _unspent_tx_output_identifier: u64) -> Result<(Script, u64), ChainError> {
+impl chain::Access for FuzzChainSource {
+       fn get_utxo(&self, _genesis_hash: &BlockHash, _short_channel_id: u64) -> Result<TxOut, chain::AccessError> {
                match self.input.get_slice(2) {
-                       Some(&[0, _]) => Err(ChainError::NotSupported),
-                       Some(&[1, _]) => Err(ChainError::NotWatched),
-                       Some(&[2, _]) => Err(ChainError::UnknownTx),
-                       Some(&[_, x]) => Ok((Builder::new().push_int(x as i64).into_script().to_v0_p2wsh(), 0)),
-                       None => Err(ChainError::UnknownTx),
+                       Some(&[0, _]) => Err(chain::AccessError::UnknownChain),
+                       Some(&[1, _]) => Err(chain::AccessError::UnknownTx),
+                       Some(&[_, x]) => Ok(TxOut { value: 0, script_pubkey: Builder::new().push_int(x as i64).into_script().to_v0_p2wsh() }),
+                       None => Err(chain::AccessError::UnknownTx),
                        _ => unreachable!(),
                }
        }
@@ -160,12 +150,16 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
        }
 
        let logger: Arc<dyn Logger> = Arc::new(test_logger::TestLogger::new("".to_owned(), out));
-       let chain_monitor = Arc::new(DummyChainWatcher {
-               input: Arc::clone(&input),
-       });
+       let chain_source = if get_slice!(1)[0] % 2 == 0 {
+               None
+       } else {
+               Some(Arc::new(FuzzChainSource {
+                       input: Arc::clone(&input),
+               }))
+       };
 
        let our_pubkey = get_pubkey!();
-       let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor, Arc::clone(&logger));
+       let net_graph_msg_handler = NetGraphMsgHandler::new(chain_source, Arc::clone(&logger));
 
        loop {
                match get_slice!(1)[0] {
index a7818d7b85c4bd38bd1ba692479ebbda7e90f460..e84ee76229fecb2a3090c72ed1865102f119eb74 100644 (file)
 //! type TxBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface;
 //! type FeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator;
 //! type Logger = dyn lightning::util::logger::Logger;
-//! type ChainWatchInterface = dyn lightning::chain::chaininterface::ChainWatchInterface;
-//! type ChannelMonitor = lightning::ln::channelmonitor::SimpleManyChannelMonitor<lightning::chain::transaction::OutPoint, lightning::chain::keysinterface::InMemoryChannelKeys, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<ChainWatchInterface>>;
-//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChannelMonitor, TxBroadcaster, FeeEstimator, Logger>;
-//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChannelMonitor, TxBroadcaster, FeeEstimator, ChainWatchInterface, Logger>;
+//! type ChainAccess = dyn lightning::chain::Access;
+//! type ChainFilter = dyn lightning::chain::Filter;
+//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemoryChannelKeys, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>>;
+//! type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>;
+//! type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>;
 //!
 //! // Connect to node with pubkey their_node_id at addr:
-//! async fn connect_to_node(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
+//! async fn connect_to_node(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, their_node_id: PublicKey, addr: SocketAddr) {
 //!     let (sender, mut receiver) = mpsc::channel(2);
 //!     lightning_net_tokio::connect_outbound(peer_manager, sender, their_node_id, addr).await;
 //!     loop {
 //!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//!         for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
 //!     }
 //! }
 //!
 //! // Begin reading from a newly accepted socket and talk to the peer:
-//! async fn accept_socket(peer_manager: PeerManager, channel_monitor: Arc<ChannelMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
+//! async fn accept_socket(peer_manager: PeerManager, chain_monitor: Arc<ChainMonitor>, channel_manager: ChannelManager, socket: TcpStream) {
 //!     let (sender, mut receiver) = mpsc::channel(2);
 //!     lightning_net_tokio::setup_inbound(peer_manager, sender, socket);
 //!     loop {
@@ -63,7 +64,7 @@
 //!         for _event in channel_manager.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
-//!         for _event in channel_monitor.get_and_clear_pending_events().drain(..) {
+//!         for _event in chain_monitor.get_and_clear_pending_events().drain(..) {
 //!             // Handle the event!
 //!         }
 //!     }
index f0a4b648ed50c1c888740d862aeb94c597f8d4c8..91e604838ecbe0b4c6867184bf3146f6a18dfb65 100644 (file)
 //! Includes traits for monitoring and receiving notifications of new blocks and block
 //! disconnections, transaction broadcasting, and feerate information requests.
 
-use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::constants::genesis_block;
-use bitcoin::network::constants::Network;
-use bitcoin::hash_types::{Txid, BlockHash};
-
-use std::sync::{Mutex, MutexGuard, Arc};
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::collections::HashSet;
-use std::ops::Deref;
-use std::marker::PhantomData;
-use std::ptr;
-
-/// Used to give chain error details upstream
-#[derive(Clone)]
-pub enum ChainError {
-       /// Client doesn't support UTXO lookup (but the chain hash matches our genesis block hash)
-       NotSupported,
-       /// Chain isn't the one watched
-       NotWatched,
-       /// Tx doesn't exist or is unconfirmed
-       UnknownTx,
-}
-
-/// An interface to request notification of certain scripts as they appear the
-/// chain.
-///
-/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
-/// called from inside the library in response to ChainListener events, P2P events, or timer
-/// events).
-pub trait ChainWatchInterface: Sync + Send {
-       /// Provides a txid/random-scriptPubKey-in-the-tx which much be watched for.
-       fn install_watch_tx(&self, txid: &Txid, script_pub_key: &Script);
-
-       /// Provides an outpoint which must be watched for, providing any transactions which spend the
-       /// given outpoint.
-       fn install_watch_outpoint(&self, outpoint: (Txid, u32), out_script: &Script);
-
-       /// Indicates that a listener needs to see all transactions.
-       fn watch_all_txn(&self);
-
-       /// Gets the script and value in satoshis for a given unspent transaction output given a
-       /// short_channel_id (aka unspent_tx_output_identier). For BTC/tBTC channels the top three
-       /// bytes are the block height, the next 3 the transaction index within the block, and the
-       /// final two the output within the transaction.
-       fn get_chain_utxo(&self, genesis_hash: BlockHash, unspent_tx_output_identifier: u64) -> Result<(Script, u64), ChainError>;
-
-       /// Gets the list of transaction indices within a given block that the ChainWatchInterface is
-       /// watching for.
-       fn filter_block(&self, block: &Block) -> Vec<usize>;
-
-       /// Returns a usize that changes when the ChainWatchInterface's watched data is modified.
-       /// Users of `filter_block` should pre-save a copy of `reentered`'s return value and use it to
-       /// determine whether they need to re-filter a given block.
-       fn reentered(&self) -> usize;
-}
 
 /// An interface to send a transaction to the Bitcoin network.
 pub trait BroadcasterInterface: Sync + Send {
@@ -77,30 +21,6 @@ pub trait BroadcasterInterface: Sync + Send {
        fn broadcast_transaction(&self, tx: &Transaction);
 }
 
-/// A trait indicating a desire to listen for events from the chain
-pub trait ChainListener: Sync + Send {
-       /// Notifies a listener that a block was connected.
-       ///
-       /// The txn_matched array should be set to references to transactions which matched the
-       /// relevant installed watch outpoints/txn, or the full set of transactions in the block.
-       ///
-       /// Note that if txn_matched includes only matched transactions, and a new
-       /// transaction/outpoint is watched during a block_connected call, the block *must* be
-       /// re-scanned with the new transaction/outpoints and block_connected should be called
-       /// again with the same header and (at least) the new transactions.
-       ///
-       /// Note that if non-new transaction/outpoints are be registered during a call, a second call
-       /// *must not* happen.
-       ///
-       /// This also means those counting confirmations using block_connected callbacks should watch
-       /// for duplicate headers and not count them towards confirmations!
-       fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[usize]);
-       /// Notifies a listener that a block was disconnected.
-       /// Unlike block_connected, this *must* never be called twice for the same disconnect event.
-       /// Height must be the one of the block which was disconnected (not new height of the best chain)
-       fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32);
-}
-
 /// An enum that represents the speed at which we want a transaction to confirm used for feerate
 /// estimation.
 pub enum ConfirmationTarget {
@@ -116,8 +36,7 @@ pub enum ConfirmationTarget {
 /// horizons.
 ///
 /// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
-/// called from inside the library in response to ChainListener events, P2P events, or timer
-/// events).
+/// called from inside the library in response to chain events, P2P events, or timer events).
 pub trait FeeEstimator: Sync + Send {
        /// Gets estimated satoshis of fee required per 1000 Weight-Units.
        ///
@@ -132,373 +51,3 @@ pub trait FeeEstimator: Sync + Send {
 
 /// Minimum relay fee as required by bitcoin network mempool policy.
 pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000;
-
-/// Utility for tracking registered txn/outpoints and checking for matches
-#[cfg_attr(test, derive(PartialEq))]
-pub struct ChainWatchedUtil {
-       watch_all: bool,
-
-       // We are more conservative in matching during testing to ensure everything matches *exactly*,
-       // even though during normal runtime we take more optimized match approaches...
-       #[cfg(test)]
-       watched_txn: HashSet<(Txid, Script)>,
-       #[cfg(not(test))]
-       watched_txn: HashSet<Script>,
-
-       watched_outpoints: HashSet<(Txid, u32)>,
-}
-
-impl ChainWatchedUtil {
-       /// Constructs an empty (watches nothing) ChainWatchedUtil
-       pub fn new() -> Self {
-               Self {
-                       watch_all: false,
-                       watched_txn: HashSet::new(),
-                       watched_outpoints: HashSet::new(),
-               }
-       }
-
-       /// Registers a tx for monitoring, returning true if it was a new tx and false if we'd already
-       /// been watching for it.
-       pub fn register_tx(&mut self, txid: &Txid, script_pub_key: &Script) -> bool {
-               if self.watch_all { return false; }
-               #[cfg(test)]
-               {
-                       self.watched_txn.insert((txid.clone(), script_pub_key.clone()))
-               }
-               #[cfg(not(test))]
-               {
-                       let _tx_unused = txid; // It's used in cfg(test), though
-                       self.watched_txn.insert(script_pub_key.clone())
-               }
-       }
-
-       /// Registers an outpoint for monitoring, returning true if it was a new outpoint and false if
-       /// we'd already been watching for it
-       pub fn register_outpoint(&mut self, outpoint: (Txid, u32), _script_pub_key: &Script) -> bool {
-               if self.watch_all { return false; }
-               self.watched_outpoints.insert(outpoint)
-       }
-
-       /// Sets us to match all transactions, returning true if this is a new setting and false if
-       /// we'd already been set to match everything.
-       pub fn watch_all(&mut self) -> bool {
-               if self.watch_all { return false; }
-               self.watch_all = true;
-               true
-       }
-
-       /// Checks if a given transaction matches the current filter.
-       pub fn does_match_tx(&self, tx: &Transaction) -> bool {
-               if self.watch_all {
-                       return true;
-               }
-               for out in tx.output.iter() {
-                       #[cfg(test)]
-                       for &(ref txid, ref script) in self.watched_txn.iter() {
-                               if *script == out.script_pubkey {
-                                       if tx.txid() == *txid {
-                                               return true;
-                                       }
-                               }
-                       }
-                       #[cfg(not(test))]
-                       for script in self.watched_txn.iter() {
-                               if *script == out.script_pubkey {
-                                       return true;
-                               }
-                       }
-               }
-               for input in tx.input.iter() {
-                       for outpoint in self.watched_outpoints.iter() {
-                               let &(outpoint_hash, outpoint_index) = outpoint;
-                               if outpoint_hash == input.previous_output.txid && outpoint_index == input.previous_output.vout {
-                                       return true;
-                               }
-                       }
-               }
-               false
-       }
-}
-
-/// BlockNotifierArc is useful when you need a BlockNotifier that points to ChainListeners with
-/// static lifetimes, e.g. when you're using lightning-net-tokio (since tokio::spawn requires
-/// parameters with static lifetimes). Other times you can afford a reference, which is more
-/// efficient, in which case BlockNotifierRef is a more appropriate type. Defining these type
-/// aliases prevents issues such as overly long function definitions.
-///
-/// (C-not exported) as we let clients handle any reference counting they need to do
-pub type BlockNotifierArc<C> = Arc<BlockNotifier<'static, Arc<ChainListener>, C>>;
-
-/// BlockNotifierRef is useful when you want a BlockNotifier that points to ChainListeners
-/// with nonstatic lifetimes. This is useful for when static lifetimes are not needed. Nonstatic
-/// lifetimes are more efficient but less flexible, and should be used by default unless static
-/// lifetimes are required, e.g. when you're using lightning-net-tokio (since tokio::spawn
-/// requires parameters with static lifetimes), in which case BlockNotifierArc is a more
-/// appropriate type. Defining these type aliases for common usages prevents issues such as
-/// overly long function definitions.
-pub type BlockNotifierRef<'a, C> = BlockNotifier<'a, &'a ChainListener, C>;
-
-/// Utility for notifying listeners about new blocks, and handling block rescans if new watch
-/// data is registered.
-///
-/// Rather than using a plain BlockNotifier, it is preferable to use either a BlockNotifierArc
-/// or a BlockNotifierRef for conciseness. See their documentation for more details, but essentially
-/// you should default to using a BlockNotifierRef, and use a BlockNotifierArc instead when you
-/// require ChainListeners with static lifetimes, such as when you're using lightning-net-tokio.
-pub struct BlockNotifier<'a, CL: Deref + 'a, C: Deref>
-               where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface {
-       listeners: Mutex<Vec<CL>>,
-       chain_monitor: C,
-       phantom: PhantomData<&'a ()>,
-}
-
-impl<'a, CL: Deref + 'a, C: Deref> BlockNotifier<'a, CL, C>
-               where CL::Target: ChainListener + 'a, C::Target: ChainWatchInterface {
-       /// Constructs a new BlockNotifier without any listeners.
-       pub fn new(chain_monitor: C) -> BlockNotifier<'a, CL, C> {
-               BlockNotifier {
-                       listeners: Mutex::new(Vec::new()),
-                       chain_monitor,
-                       phantom: PhantomData,
-               }
-       }
-
-       /// Register the given listener to receive events.
-       pub fn register_listener(&self, listener: CL) {
-               let mut vec = self.listeners.lock().unwrap();
-               vec.push(listener);
-       }
-       /// Unregister the given listener to no longer
-       /// receive events.
-       ///
-       /// If the same listener is registered multiple times, unregistering
-       /// will remove ALL occurrences of that listener. Comparison is done using
-       /// the pointer returned by the Deref trait implementation.
-       ///
-       /// (C-not exported) because the equality check would always fail
-       pub fn unregister_listener(&self, listener: CL) {
-               let mut vec = self.listeners.lock().unwrap();
-               // item is a ref to an abstract thing that dereferences to a ChainListener,
-               // so dereference it twice to get the ChainListener itself
-               vec.retain(|item | !ptr::eq(&(**item), &(*listener)));
-       }
-
-       /// Notify listeners that a block was connected given a full, unfiltered block.
-       ///
-       /// Handles re-scanning the block and calling block_connected again if listeners register new
-       /// watch data during the callbacks for you (see ChainListener::block_connected for more info).
-       pub fn block_connected(&self, block: &Block, height: u32) {
-               let mut reentered = true;
-               while reentered {
-                       let matched_indexes = self.chain_monitor.filter_block(block);
-                       let mut matched_txn = Vec::new();
-                       for index in matched_indexes.iter() {
-                               matched_txn.push(&block.txdata[*index]);
-                       }
-                       reentered = self.block_connected_checked(&block.header, height, matched_txn.as_slice(), matched_indexes.as_slice());
-               }
-       }
-
-       /// Notify listeners that a block was connected, given pre-filtered list of transactions in the
-       /// block which matched the filter (probably using does_match_tx).
-       ///
-       /// Returns true if notified listeners registered additional watch data (implying that the
-       /// block must be re-scanned and this function called again prior to further block_connected
-       /// calls, see ChainListener::block_connected for more info).
-       pub fn block_connected_checked(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[usize]) -> bool {
-               let last_seen = self.chain_monitor.reentered();
-
-               let listeners = self.listeners.lock().unwrap();
-               for listener in listeners.iter() {
-                       listener.block_connected(header, height, txn_matched, indexes_of_txn_matched);
-               }
-               return last_seen != self.chain_monitor.reentered();
-       }
-
-       /// Notify listeners that a block was disconnected.
-       pub fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
-               let listeners = self.listeners.lock().unwrap();
-               for listener in listeners.iter() {
-                       listener.block_disconnected(&header, disconnected_height);
-               }
-       }
-}
-
-/// Utility to capture some common parts of ChainWatchInterface implementors.
-///
-/// Keeping a local copy of this in a ChainWatchInterface implementor is likely useful.
-pub struct ChainWatchInterfaceUtil {
-       network: Network,
-       watched: Mutex<ChainWatchedUtil>,
-       reentered: AtomicUsize,
-}
-
-// We only expose PartialEq in test since its somewhat unclear exactly what it should do and we're
-// only comparing a subset of fields (essentially just checking that the set of things we're
-// watching is the same).
-#[cfg(test)]
-impl PartialEq for ChainWatchInterfaceUtil {
-       fn eq(&self, o: &Self) -> bool {
-               self.network == o.network &&
-               *self.watched.lock().unwrap() == *o.watched.lock().unwrap()
-       }
-}
-
-/// Register listener
-impl ChainWatchInterface for ChainWatchInterfaceUtil {
-       fn install_watch_tx(&self, txid: &Txid, script_pub_key: &Script) {
-               let mut watched = self.watched.lock().unwrap();
-               if watched.register_tx(txid, script_pub_key) {
-                       self.reentered.fetch_add(1, Ordering::Relaxed);
-               }
-       }
-
-       fn install_watch_outpoint(&self, outpoint: (Txid, u32), out_script: &Script) {
-               let mut watched = self.watched.lock().unwrap();
-               if watched.register_outpoint(outpoint, out_script) {
-                       self.reentered.fetch_add(1, Ordering::Relaxed);
-               }
-       }
-
-       fn watch_all_txn(&self) {
-               let mut watched = self.watched.lock().unwrap();
-               if watched.watch_all() {
-                       self.reentered.fetch_add(1, Ordering::Relaxed);
-               }
-       }
-
-       fn get_chain_utxo(&self, genesis_hash: BlockHash, _unspent_tx_output_identifier: u64) -> Result<(Script, u64), ChainError> {
-               if genesis_hash != genesis_block(self.network).header.block_hash() {
-                       return Err(ChainError::NotWatched);
-               }
-               Err(ChainError::NotSupported)
-       }
-
-       fn filter_block(&self, block: &Block) -> Vec<usize> {
-               let mut matched_index = Vec::new();
-               let mut matched_txids = HashSet::new();
-               {
-                       let watched = self.watched.lock().unwrap();
-                       for (index, transaction) in block.txdata.iter().enumerate() {
-                               // A tx matches the filter if it either matches the filter directly (via
-                               // does_match_tx_unguarded) or if it is a descendant of another matched
-                               // transaction within the same block, which we check for in the loop.
-                               let mut matched = self.does_match_tx_unguarded(transaction, &watched);
-                               for input in transaction.input.iter() {
-                                       if matched || matched_txids.contains(&input.previous_output.txid) {
-                                               matched = true;
-                                               break;
-                                       }
-                               }
-                               if matched {
-                                       matched_txids.insert(transaction.txid());
-                                       matched_index.push(index);
-                               }
-                       }
-               }
-               matched_index
-       }
-
-       fn reentered(&self) -> usize {
-               self.reentered.load(Ordering::Relaxed)
-       }
-}
-
-impl ChainWatchInterfaceUtil {
-       /// Creates a new ChainWatchInterfaceUtil for the given network
-       pub fn new(network: Network) -> ChainWatchInterfaceUtil {
-               ChainWatchInterfaceUtil {
-                       network,
-                       watched: Mutex::new(ChainWatchedUtil::new()),
-                       reentered: AtomicUsize::new(1),
-               }
-       }
-
-       /// Checks if a given transaction matches the current filter.
-       pub fn does_match_tx(&self, tx: &Transaction) -> bool {
-               let watched = self.watched.lock().unwrap();
-               self.does_match_tx_unguarded (tx, &watched)
-       }
-
-       fn does_match_tx_unguarded(&self, tx: &Transaction, watched: &MutexGuard<ChainWatchedUtil>) -> bool {
-               watched.does_match_tx(tx)
-       }
-}
-
-#[cfg(test)]
-mod tests {
-       use ln::functional_test_utils::{create_chanmon_cfgs, create_node_cfgs};
-       use super::{BlockNotifier, ChainListener};
-       use std::ptr;
-
-       #[test]
-       fn register_listener_test() {
-               let chanmon_cfgs = create_chanmon_cfgs(1);
-               let node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
-               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor);
-               assert_eq!(block_notifier.listeners.lock().unwrap().len(), 0);
-               let listener = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
-               block_notifier.register_listener(listener);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 1);
-               let item = vec.first().clone().unwrap();
-               assert!(ptr::eq(&(**item), &(*listener)));
-       }
-
-       #[test]
-       fn unregister_single_listener_test() {
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor);
-               let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
-               let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener;
-               block_notifier.register_listener(listener1);
-               block_notifier.register_listener(listener2);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 2);
-               drop(vec);
-               block_notifier.unregister_listener(listener1);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 1);
-               let item = vec.first().clone().unwrap();
-               assert!(ptr::eq(&(**item), &(*listener2)));
-       }
-
-       #[test]
-       fn unregister_single_listener_ref_test() {
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor);
-               block_notifier.register_listener(&node_cfgs[0].chan_monitor.simple_monitor as &ChainListener);
-               block_notifier.register_listener(&node_cfgs[1].chan_monitor.simple_monitor as &ChainListener);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 2);
-               drop(vec);
-               block_notifier.unregister_listener(&node_cfgs[0].chan_monitor.simple_monitor);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 1);
-               let item = vec.first().clone().unwrap();
-               assert!(ptr::eq(&(**item), &(*&node_cfgs[1].chan_monitor.simple_monitor)));
-       }
-
-       #[test]
-       fn unregister_multiple_of_the_same_listeners_test() {
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor);
-               let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
-               let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener;
-               block_notifier.register_listener(listener1);
-               block_notifier.register_listener(listener1);
-               block_notifier.register_listener(listener2);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 3);
-               drop(vec);
-               block_notifier.unregister_listener(listener1);
-               let vec = block_notifier.listeners.lock().unwrap();
-               assert_eq!(vec.len(), 1);
-               let item = vec.first().clone().unwrap();
-               assert!(ptr::eq(&(**item), &(*listener2)));
-       }
-}
diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs
new file mode 100644 (file)
index 0000000..d858c12
--- /dev/null
@@ -0,0 +1,227 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Logic to connect off-chain channel management with on-chain transaction monitoring.
+//!
+//! [`ChainMonitor`] is an implementation of [`chain::Watch`] used both to process blocks and to
+//! update [`ChannelMonitor`]s accordingly. If any on-chain events need further processing, it will
+//! make those available as [`MonitorEvent`]s to be consumed.
+//!
+//! `ChainMonitor` is parameterized by an optional chain source, which must implement the
+//! [`chain::Filter`] trait. This provides a mechanism to signal new relevant outputs back to light
+//! clients, such that transactions spending those outputs are included in block data.
+//!
+//! `ChainMonitor` may be used directly to monitor channels locally or as a part of a distributed
+//! setup to monitor channels remotely. In the latter case, a custom `chain::Watch` implementation
+//! would be responsible for routing each update to a remote server and for retrieving monitor
+//! events. The remote server would make use of `ChainMonitor` for block processing and for
+//! servicing `ChannelMonitor` updates from the client.
+//!
+//! [`ChainMonitor`]: struct.ChainMonitor.html
+//! [`chain::Filter`]: ../trait.Filter.html
+//! [`chain::Watch`]: ../trait.Watch.html
+//! [`ChannelMonitor`]: ../channelmonitor/struct.ChannelMonitor.html
+//! [`MonitorEvent`]: ../channelmonitor/enum.MonitorEvent.html
+
+use bitcoin::blockdata::block::BlockHeader;
+
+use chain;
+use chain::Filter;
+use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, MonitorEvent, MonitorUpdateError};
+use chain::transaction::{OutPoint, TransactionData};
+use chain::keysinterface::ChannelKeys;
+use util::logger::Logger;
+use util::events;
+use util::events::Event;
+
+use std::collections::{HashMap, hash_map};
+use std::sync::Mutex;
+use std::ops::Deref;
+
+/// An implementation of [`chain::Watch`] for monitoring channels.
+///
+/// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
+/// [`chain::Watch`]. May be used in conjunction with [`ChannelManager`] to monitor channels locally
+/// or used independently to monitor channels remotely. See the [module-level documentation] for
+/// details.
+///
+/// [`chain::Watch`]: ../trait.Watch.html
+/// [`ChannelManager`]: ../../ln/channelmanager/struct.ChannelManager.html
+/// [module-level documentation]: index.html
+pub struct ChainMonitor<ChanSigner: ChannelKeys, C: Deref, T: Deref, F: Deref, L: Deref>
+       where C::Target: chain::Filter,
+        T::Target: BroadcasterInterface,
+        F::Target: FeeEstimator,
+        L::Target: Logger,
+{
+       /// The monitors
+       pub monitors: Mutex<HashMap<OutPoint, ChannelMonitor<ChanSigner>>>,
+       chain_source: Option<C>,
+       broadcaster: T,
+       logger: L,
+       fee_estimator: F
+}
+
+impl<ChanSigner: ChannelKeys, C: Deref, T: Deref, F: Deref, L: Deref> ChainMonitor<ChanSigner, C, T, F, L>
+       where C::Target: chain::Filter,
+             T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator,
+             L::Target: Logger,
+{
+       /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
+       /// of a channel and reacting accordingly based on transactions in the connected block. See
+       /// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
+       /// be returned by [`chain::Watch::release_pending_monitor_events`].
+       ///
+       /// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch, returning
+       /// `true` if so. Subsequent calls must not exclude any transactions matching the new outputs
+       /// nor any in-block descendants of such transactions. It is not necessary to re-fetch the block
+       /// to obtain updated `txdata`.
+       ///
+       /// [`ChannelMonitor::block_connected`]: ../channelmonitor/struct.ChannelMonitor.html#method.block_connected
+       /// [`chain::Watch::release_pending_monitor_events`]: ../trait.Watch.html#tymethod.release_pending_monitor_events
+       /// [`chain::Filter`]: ../trait.Filter.html
+       pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) -> bool {
+               let mut has_new_outputs_to_watch = false;
+               {
+                       let mut monitors = self.monitors.lock().unwrap();
+                       for monitor in monitors.values_mut() {
+                               let mut txn_outputs = monitor.block_connected(header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+                               has_new_outputs_to_watch |= !txn_outputs.is_empty();
+
+                               if let Some(ref chain_source) = self.chain_source {
+                                       for (txid, outputs) in txn_outputs.drain(..) {
+                                               for (idx, output) in outputs.iter().enumerate() {
+                                                       chain_source.register_output(&OutPoint { txid, index: idx as u16 }, &output.script_pubkey);
+                                               }
+                                       }
+                               }
+                       }
+               }
+               has_new_outputs_to_watch
+       }
+
+       /// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
+       /// of a channel based on the disconnected block. See [`ChannelMonitor::block_disconnected`] for
+       /// details.
+       ///
+       /// [`ChannelMonitor::block_disconnected`]: ../channelmonitor/struct.ChannelMonitor.html#method.block_disconnected
+       pub fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
+               let mut monitors = self.monitors.lock().unwrap();
+               for monitor in monitors.values_mut() {
+                       monitor.block_disconnected(header, disconnected_height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
+               }
+       }
+
+       /// Creates a new `ChainMonitor` used to watch on-chain activity pertaining to channels.
+       ///
+       /// When an optional chain source implementing [`chain::Filter`] is provided, the chain monitor
+       /// will call back to it indicating transactions and outputs of interest. This allows clients to
+       /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
+       /// always need to fetch full blocks absent another means for determining which blocks contain
+       /// transactions relevant to the watched channels.
+       ///
+       /// [`chain::Filter`]: ../trait.Filter.html
+       pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F) -> Self {
+               Self {
+                       monitors: Mutex::new(HashMap::new()),
+                       chain_source,
+                       broadcaster,
+                       logger,
+                       fee_estimator: feeest,
+               }
+       }
+
+       /// Adds the monitor that watches the channel referred to by the given outpoint.
+       ///
+       /// Calls back to [`chain::Filter`] with the funding transaction and outputs to watch.
+       ///
+       /// [`chain::Filter`]: ../trait.Filter.html
+       fn add_monitor(&self, outpoint: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
+               let mut monitors = self.monitors.lock().unwrap();
+               let entry = match monitors.entry(outpoint) {
+                       hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given outpoint is already present")),
+                       hash_map::Entry::Vacant(e) => e,
+               };
+               {
+                       let funding_txo = monitor.get_funding_txo();
+                       log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..]));
+
+                       if let Some(ref chain_source) = self.chain_source {
+                               chain_source.register_tx(&funding_txo.0.txid, &funding_txo.1);
+                               for (txid, outputs) in monitor.get_outputs_to_watch().iter() {
+                                       for (idx, script_pubkey) in outputs.iter().enumerate() {
+                                               chain_source.register_output(&OutPoint { txid: *txid, index: idx as u16 }, &script_pubkey);
+                                       }
+                               }
+                       }
+               }
+               entry.insert(monitor);
+               Ok(())
+       }
+
+       /// Updates the monitor that watches the channel referred to by the given outpoint.
+       fn update_monitor(&self, outpoint: OutPoint, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+               let mut monitors = self.monitors.lock().unwrap();
+               match monitors.get_mut(&outpoint) {
+                       Some(orig_monitor) => {
+                               log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor));
+                               orig_monitor.update_monitor(update, &self.broadcaster, &self.logger)
+                       },
+                       None => Err(MonitorUpdateError("No such monitor registered"))
+               }
+       }
+}
+
+impl<ChanSigner: ChannelKeys, C: Deref + Sync + Send, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send> chain::Watch for ChainMonitor<ChanSigner, C, T, F, L>
+       where C::Target: chain::Filter,
+             T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator,
+             L::Target: Logger,
+{
+       type Keys = ChanSigner;
+
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+               match self.add_monitor(funding_txo, monitor) {
+                       Ok(_) => Ok(()),
+                       Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
+               }
+       }
+
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+               match self.update_monitor(funding_txo, update) {
+                       Ok(_) => Ok(()),
+                       Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
+               }
+       }
+
+       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+               let mut pending_monitor_events = Vec::new();
+               for chan in self.monitors.lock().unwrap().values_mut() {
+                       pending_monitor_events.append(&mut chan.get_and_clear_pending_monitor_events());
+               }
+               pending_monitor_events
+       }
+}
+
+impl<ChanSigner: ChannelKeys, C: Deref, T: Deref, F: Deref, L: Deref> events::EventsProvider for ChainMonitor<ChanSigner, C, T, F, L>
+       where C::Target: chain::Filter,
+             T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator,
+             L::Target: Logger,
+{
+       fn get_and_clear_pending_events(&self) -> Vec<Event> {
+               let mut pending_events = Vec::new();
+               for chan in self.monitors.lock().unwrap().values_mut() {
+                       pending_events.append(&mut chan.get_and_clear_pending_events());
+               }
+               pending_events
+       }
+}
diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs
new file mode 100644 (file)
index 0000000..feff397
--- /dev/null
@@ -0,0 +1,2661 @@
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
+//! here.
+//!
+//! ChannelMonitor objects are generated by ChannelManager in response to relevant
+//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
+//! be made in responding to certain messages, see [`chain::Watch`] for more.
+//!
+//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
+//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
+//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
+//! security-domain-separated system design, you should consider having multiple paths for
+//! ChannelMonitors to get out of the HSM and onto monitoring devices.
+//!
+//! [`chain::Watch`]: ../trait.Watch.html
+
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::{TxOut,Transaction};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::{Script, Builder};
+use bitcoin::blockdata::opcodes;
+use bitcoin::consensus::encode;
+
+use bitcoin::hashes::Hash;
+use bitcoin::hashes::sha256::Hash as Sha256;
+use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
+
+use bitcoin::secp256k1::{Secp256k1,Signature};
+use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1;
+
+use ln::msgs::DecodeError;
+use ln::chan_utils;
+use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, HTLCType};
+use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
+use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
+use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::transaction::{OutPoint, TransactionData};
+use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
+use util::logger::Logger;
+use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
+use util::byte_utils;
+use util::events::Event;
+
+use std::collections::{HashMap, HashSet, hash_map};
+use std::{cmp, mem};
+use std::ops::Deref;
+use std::io::Error;
+
+/// An update generated by the underlying Channel itself which contains some new information the
+/// ChannelMonitor should be made aware of.
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+#[must_use]
+pub struct ChannelMonitorUpdate {
+       pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
+       /// The sequence number of this update. Updates *must* be replayed in-order according to this
+       /// sequence number (and updates may panic if they are not). The update_id values are strictly
+       /// increasing and increase by one for each new update.
+       ///
+       /// This sequence number is also used to track up to which points updates which returned
+       /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
+       /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
+       pub update_id: u64,
+}
+
+impl Writeable for ChannelMonitorUpdate {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+               self.update_id.write(w)?;
+               (self.updates.len() as u64).write(w)?;
+               for update_step in self.updates.iter() {
+                       update_step.write(w)?;
+               }
+               Ok(())
+       }
+}
+impl Readable for ChannelMonitorUpdate {
+       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let update_id: u64 = Readable::read(r)?;
+               let len: u64 = Readable::read(r)?;
+               let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<ChannelMonitorUpdateStep>()));
+               for _ in 0..len {
+                       updates.push(Readable::read(r)?);
+               }
+               Ok(Self { update_id, updates })
+       }
+}
+
+/// An error enum representing a failure to persist a channel monitor update.
+#[derive(Clone)]
+pub enum ChannelMonitorUpdateErr {
+       /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
+       /// our state failed, but is expected to succeed at some point in the future).
+       ///
+       /// Such a failure will "freeze" a channel, preventing us from revoking old states or
+       /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
+       /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
+       /// restore the channel to an operational state.
+       ///
+       /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
+       /// you return a TemporaryFailure you must ensure that it is written to disk safely before
+       /// writing out the latest ChannelManager state.
+       ///
+       /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
+       /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
+       /// to claim it on this channel) and those updates must be applied wherever they can be. At
+       /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
+       /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
+       /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
+       /// been "frozen".
+       ///
+       /// Note that even if updates made after TemporaryFailure succeed you must still call
+       /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
+       /// operation.
+       ///
+       /// Note that the update being processed here will not be replayed for you when you call
+       /// ChannelManager::channel_monitor_updated, so you must store the update itself along
+       /// with the persisted ChannelMonitor on your own local disk prior to returning a
+       /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
+       /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
+       /// reload-time.
+       ///
+       /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
+       /// remote location (with local copies persisted immediately), it is anticipated that all
+       /// updates will return TemporaryFailure until the remote copies could be updated.
+       TemporaryFailure,
+       /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
+       /// different watchtower and cannot update with all watchtowers that were previously informed
+       /// of this channel).
+       ///
+       /// At reception of this error, ChannelManager will force-close the channel and return at
+       /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
+       /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
+       /// update must be rejected.
+       ///
+       /// This failure may also signal a failure to update the local persisted copy of one of
+       /// the channel monitor instance.
+       ///
+       /// Note that even when you fail a holder commitment transaction update, you must store the
+       /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
+       /// broadcasts it (e.g distributed channel-monitor deployment)
+       ///
+       /// In case of distributed watchtowers deployment, the new version must be written to disk, as
+       /// state may have been stored but rejected due to a block forcing a commitment broadcast. This
+       /// storage is used to claim outputs of rejected state confirmed onchain by another watchtower,
+       /// lagging behind on block processing.
+       PermanentFailure,
+}
+
+/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
+/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
+/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
+/// corrupted.
+/// Contains a human-readable error message.
+#[derive(Debug)]
+pub struct MonitorUpdateError(pub &'static str);
+
+/// An event to be processed by the ChannelManager.
+#[derive(PartialEq)]
+pub enum MonitorEvent {
+       /// A monitor event containing an HTLCUpdate.
+       HTLCEvent(HTLCUpdate),
+
+       /// A monitor event that the Channel's commitment transaction was broadcasted.
+       CommitmentTxBroadcasted(OutPoint),
+}
+
+/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
+/// chain. Used to update the corresponding HTLC in the backward channel. Failing to pass the
+/// preimage claim backward will lead to loss of funds.
+///
+/// [`chain::Watch`]: ../trait.Watch.html
+#[derive(Clone, PartialEq)]
+pub struct HTLCUpdate {
+       pub(crate) payment_hash: PaymentHash,
+       pub(crate) payment_preimage: Option<PaymentPreimage>,
+       pub(crate) source: HTLCSource
+}
+impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
+
+/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
+/// instead claiming it in its own individual transaction.
+pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
+/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
+/// HTLC-Success transaction.
+/// In other words, this is an upper bound on how many blocks we think it can take us to get a
+/// transaction confirmed (and we use it in a few more, equivalent, places).
+pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
+/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
+/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
+/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
+/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
+/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
+/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
+/// due to expiration but increase the cost of funds being locked longuer in case of failure.
+/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
+/// accurate block height.
+/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
+/// with at worst this delay, so we are not only using this value as a mercy for them but also
+/// us as a safeguard to delay with enough time.
+pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
+/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
+/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
+/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
+/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
+/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
+/// keeping bumping another claim tx to solve the outpoint.
+pub(crate) const ANTI_REORG_DELAY: u32 = 6;
+/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
+/// refuse to accept a new HTLC.
+///
+/// This is used for a few separate purposes:
+/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are
+///    waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will
+///    fail this HTLC,
+/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
+///    condition with the above), we will fail this HTLC without telling the user we received it,
+/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
+///    that HTLC expires within this many blocks, we will simply fail the HTLC instead.
+///
+/// (1) is all about protecting us - we need enough time to update the channel state before we hit
+/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
+///
+/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
+/// in a race condition between the user connecting a block (which would fail it) and the user
+/// providing us the preimage (which would claim it).
+///
+/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
+/// end up force-closing the channel on us to claim it.
+pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
+
+#[derive(Clone, PartialEq)]
+struct HolderSignedTx {
+       /// txid of the transaction in tx, just used to make comparison faster
+       txid: Txid,
+       revocation_key: PublicKey,
+       a_htlc_key: PublicKey,
+       b_htlc_key: PublicKey,
+       delayed_payment_key: PublicKey,
+       per_commitment_point: PublicKey,
+       feerate_per_kw: u32,
+       htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+}
+
+/// We use this to track counterparty commitment transactions and htlcs outputs and
+/// use it to generate any justice or 2nd-stage preimage/timeout transactions.
+#[derive(PartialEq)]
+struct CounterpartyCommitmentTransaction {
+       counterparty_delayed_payment_base_key: PublicKey,
+       counterparty_htlc_base_key: PublicKey,
+       on_counterparty_tx_csv: u16,
+       per_htlc: HashMap<Txid, Vec<HTLCOutputInCommitment>>
+}
+
+impl Writeable for CounterpartyCommitmentTransaction {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+               self.counterparty_delayed_payment_base_key.write(w)?;
+               self.counterparty_htlc_base_key.write(w)?;
+               w.write_all(&byte_utils::be16_to_array(self.on_counterparty_tx_csv))?;
+               w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?;
+               for (ref txid, ref htlcs) in self.per_htlc.iter() {
+                       w.write_all(&txid[..])?;
+                       w.write_all(&byte_utils::be64_to_array(htlcs.len() as u64))?;
+                       for &ref htlc in htlcs.iter() {
+                               htlc.write(w)?;
+                       }
+               }
+               Ok(())
+       }
+}
+impl Readable for CounterpartyCommitmentTransaction {
+       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               let counterparty_commitment_transaction = {
+                       let counterparty_delayed_payment_base_key = Readable::read(r)?;
+                       let counterparty_htlc_base_key = Readable::read(r)?;
+                       let on_counterparty_tx_csv: u16 = Readable::read(r)?;
+                       let per_htlc_len: u64 = Readable::read(r)?;
+                       let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64));
+                       for _  in 0..per_htlc_len {
+                               let txid: Txid = Readable::read(r)?;
+                               let htlcs_count: u64 = Readable::read(r)?;
+                               let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
+                               for _ in 0..htlcs_count {
+                                       let htlc = Readable::read(r)?;
+                                       htlcs.push(htlc);
+                               }
+                               if let Some(_) = per_htlc.insert(txid, htlcs) {
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                       }
+                       CounterpartyCommitmentTransaction {
+                               counterparty_delayed_payment_base_key,
+                               counterparty_htlc_base_key,
+                               on_counterparty_tx_csv,
+                               per_htlc,
+                       }
+               };
+               Ok(counterparty_commitment_transaction)
+       }
+}
+
+/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
+/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
+/// a new bumped one in case of lenghty confirmation delay
+#[derive(Clone, PartialEq)]
+pub(crate) enum InputMaterial {
+       Revoked {
+               per_commitment_point: PublicKey,
+               counterparty_delayed_payment_base_key: PublicKey,
+               counterparty_htlc_base_key: PublicKey,
+               per_commitment_key: SecretKey,
+               input_descriptor: InputDescriptors,
+               amount: u64,
+               htlc: Option<HTLCOutputInCommitment>,
+               on_counterparty_tx_csv: u16,
+       },
+       CounterpartyHTLC {
+               per_commitment_point: PublicKey,
+               counterparty_delayed_payment_base_key: PublicKey,
+               counterparty_htlc_base_key: PublicKey,
+               preimage: Option<PaymentPreimage>,
+               htlc: HTLCOutputInCommitment
+       },
+       HolderHTLC {
+               preimage: Option<PaymentPreimage>,
+               amount: u64,
+       },
+       Funding {
+               funding_redeemscript: Script,
+       }
+}
+
+impl Writeable for InputMaterial  {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       &InputMaterial::Revoked { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_counterparty_tx_csv} => {
+                               writer.write_all(&[0; 1])?;
+                               per_commitment_point.write(writer)?;
+                               counterparty_delayed_payment_base_key.write(writer)?;
+                               counterparty_htlc_base_key.write(writer)?;
+                               writer.write_all(&per_commitment_key[..])?;
+                               input_descriptor.write(writer)?;
+                               writer.write_all(&byte_utils::be64_to_array(*amount))?;
+                               htlc.write(writer)?;
+                               on_counterparty_tx_csv.write(writer)?;
+                       },
+                       &InputMaterial::CounterpartyHTLC { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref preimage, ref htlc} => {
+                               writer.write_all(&[1; 1])?;
+                               per_commitment_point.write(writer)?;
+                               counterparty_delayed_payment_base_key.write(writer)?;
+                               counterparty_htlc_base_key.write(writer)?;
+                               preimage.write(writer)?;
+                               htlc.write(writer)?;
+                       },
+                       &InputMaterial::HolderHTLC { ref preimage, ref amount } => {
+                               writer.write_all(&[2; 1])?;
+                               preimage.write(writer)?;
+                               writer.write_all(&byte_utils::be64_to_array(*amount))?;
+                       },
+                       &InputMaterial::Funding { ref funding_redeemscript } => {
+                               writer.write_all(&[3; 1])?;
+                               funding_redeemscript.write(writer)?;
+                       }
+               }
+               Ok(())
+       }
+}
+
+impl Readable for InputMaterial {
+       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               let input_material = match <u8 as Readable>::read(reader)? {
+                       0 => {
+                               let per_commitment_point = Readable::read(reader)?;
+                               let counterparty_delayed_payment_base_key = Readable::read(reader)?;
+                               let counterparty_htlc_base_key = Readable::read(reader)?;
+                               let per_commitment_key = Readable::read(reader)?;
+                               let input_descriptor = Readable::read(reader)?;
+                               let amount = Readable::read(reader)?;
+                               let htlc = Readable::read(reader)?;
+                               let on_counterparty_tx_csv = Readable::read(reader)?;
+                               InputMaterial::Revoked {
+                                       per_commitment_point,
+                                       counterparty_delayed_payment_base_key,
+                                       counterparty_htlc_base_key,
+                                       per_commitment_key,
+                                       input_descriptor,
+                                       amount,
+                                       htlc,
+                                       on_counterparty_tx_csv
+                               }
+                       },
+                       1 => {
+                               let per_commitment_point = Readable::read(reader)?;
+                               let counterparty_delayed_payment_base_key = Readable::read(reader)?;
+                               let counterparty_htlc_base_key = Readable::read(reader)?;
+                               let preimage = Readable::read(reader)?;
+                               let htlc = Readable::read(reader)?;
+                               InputMaterial::CounterpartyHTLC {
+                                       per_commitment_point,
+                                       counterparty_delayed_payment_base_key,
+                                       counterparty_htlc_base_key,
+                                       preimage,
+                                       htlc
+                               }
+                       },
+                       2 => {
+                               let preimage = Readable::read(reader)?;
+                               let amount = Readable::read(reader)?;
+                               InputMaterial::HolderHTLC {
+                                       preimage,
+                                       amount,
+                               }
+                       },
+                       3 => {
+                               InputMaterial::Funding {
+                                       funding_redeemscript: Readable::read(reader)?,
+                               }
+                       }
+                       _ => return Err(DecodeError::InvalidValue),
+               };
+               Ok(input_material)
+       }
+}
+
+/// ClaimRequest is a descriptor structure to communicate between detection
+/// and reaction module. They are generated by ChannelMonitor while parsing
+/// onchain txn leaked from a channel and handed over to OnchainTxHandler which
+/// is responsible for opportunistic aggregation, selecting and enforcing
+/// bumping logic, building and signing transactions.
+pub(crate) struct ClaimRequest {
+       // Block height before which claiming is exclusive to one party,
+       // after reaching it, claiming may be contentious.
+       pub(crate) absolute_timelock: u32,
+       // Timeout tx must have nLocktime set which means aggregating multiple
+       // ones must take the higher nLocktime among them to satisfy all of them.
+       // Sadly it has few pitfalls, a) it takes longuer to get fund back b) CLTV_DELTA
+       // of a sooner-HTLC could be swallowed by the highest nLocktime of the HTLC set.
+       // Do simplify we mark them as non-aggregable.
+       pub(crate) aggregable: bool,
+       // Basic bitcoin outpoint (txid, vout)
+       pub(crate) outpoint: BitcoinOutPoint,
+       // Following outpoint type, set of data needed to generate transaction digest
+       // and satisfy witness program.
+       pub(crate) witness_data: InputMaterial
+}
+
+/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
+/// once they mature to enough confirmations (ANTI_REORG_DELAY)
+#[derive(Clone, PartialEq)]
+enum OnchainEvent {
+       /// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve
+       /// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can
+       /// only win from it, so it's never an OnchainEvent
+       HTLCUpdate {
+               htlc_update: (HTLCSource, PaymentHash),
+       },
+       MaturingOutput {
+               descriptor: SpendableOutputDescriptor,
+       },
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+pub(crate) enum ChannelMonitorUpdateStep {
+       LatestHolderCommitmentTXInfo {
+               commitment_tx: HolderCommitmentTransaction,
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+       },
+       LatestCounterpartyCommitmentTXInfo {
+               unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+               commitment_number: u64,
+               their_revocation_point: PublicKey,
+       },
+       PaymentPreimage {
+               payment_preimage: PaymentPreimage,
+       },
+       CommitmentSecret {
+               idx: u64,
+               secret: [u8; 32],
+       },
+       /// Used to indicate that the no future updates will occur, and likely that the latest holder
+       /// commitment transaction(s) should be broadcast, as the channel has been force-closed.
+       ChannelForceClosed {
+               /// If set to false, we shouldn't broadcast the latest holder commitment transaction as we
+               /// think we've fallen behind!
+               should_broadcast: bool,
+       },
+}
+
+impl Writeable for ChannelMonitorUpdateStep {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       &ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { ref commitment_tx, ref htlc_outputs } => {
+                               0u8.write(w)?;
+                               commitment_tx.write(w)?;
+                               (htlc_outputs.len() as u64).write(w)?;
+                               for &(ref output, ref signature, ref source) in htlc_outputs.iter() {
+                                       output.write(w)?;
+                                       signature.write(w)?;
+                                       source.write(w)?;
+                               }
+                       }
+                       &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => {
+                               1u8.write(w)?;
+                               unsigned_commitment_tx.write(w)?;
+                               commitment_number.write(w)?;
+                               their_revocation_point.write(w)?;
+                               (htlc_outputs.len() as u64).write(w)?;
+                               for &(ref output, ref source) in htlc_outputs.iter() {
+                                       output.write(w)?;
+                                       source.as_ref().map(|b| b.as_ref()).write(w)?;
+                               }
+                       },
+                       &ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => {
+                               2u8.write(w)?;
+                               payment_preimage.write(w)?;
+                       },
+                       &ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => {
+                               3u8.write(w)?;
+                               idx.write(w)?;
+                               secret.write(w)?;
+                       },
+                       &ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => {
+                               4u8.write(w)?;
+                               should_broadcast.write(w)?;
+                       },
+               }
+               Ok(())
+       }
+}
+impl Readable for ChannelMonitorUpdateStep {
+       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+               match Readable::read(r)? {
+                       0u8 => {
+                               Ok(ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+                                       commitment_tx: Readable::read(r)?,
+                                       htlc_outputs: {
+                                               let len: u64 = Readable::read(r)?;
+                                               let mut res = Vec::new();
+                                               for _ in 0..len {
+                                                       res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?));
+                                               }
+                                               res
+                                       },
+                               })
+                       },
+                       1u8 => {
+                               Ok(ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
+                                       unsigned_commitment_tx: Readable::read(r)?,
+                                       commitment_number: Readable::read(r)?,
+                                       their_revocation_point: Readable::read(r)?,
+                                       htlc_outputs: {
+                                               let len: u64 = Readable::read(r)?;
+                                               let mut res = Vec::new();
+                                               for _ in 0..len {
+                                                       res.push((Readable::read(r)?, <Option<HTLCSource> as Readable>::read(r)?.map(|o| Box::new(o))));
+                                               }
+                                               res
+                                       },
+                               })
+                       },
+                       2u8 => {
+                               Ok(ChannelMonitorUpdateStep::PaymentPreimage {
+                                       payment_preimage: Readable::read(r)?,
+                               })
+                       },
+                       3u8 => {
+                               Ok(ChannelMonitorUpdateStep::CommitmentSecret {
+                                       idx: Readable::read(r)?,
+                                       secret: Readable::read(r)?,
+                               })
+                       },
+                       4u8 => {
+                               Ok(ChannelMonitorUpdateStep::ChannelForceClosed {
+                                       should_broadcast: Readable::read(r)?
+                               })
+                       },
+                       _ => Err(DecodeError::InvalidValue),
+               }
+       }
+}
+
+/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
+/// on-chain transactions to ensure no loss of funds occurs.
+///
+/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
+/// information and are actively monitoring the chain.
+///
+/// Pending Events or updated HTLCs which have not yet been read out by
+/// get_and_clear_pending_monitor_events or get_and_clear_pending_events are serialized to disk and
+/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
+/// gotten are fully handled before re-serializing the new state.
+pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
+       latest_update_id: u64,
+       commitment_transaction_number_obscure_factor: u64,
+
+       destination_script: Script,
+       broadcasted_holder_revokable_script: Option<(Script, PublicKey, PublicKey)>,
+       counterparty_payment_script: Script,
+       shutdown_script: Script,
+
+       keys: ChanSigner,
+       funding_info: (OutPoint, Script),
+       current_counterparty_commitment_txid: Option<Txid>,
+       prev_counterparty_commitment_txid: Option<Txid>,
+
+       counterparty_tx_cache: CounterpartyCommitmentTransaction,
+       funding_redeemscript: Script,
+       channel_value_satoshis: u64,
+       // first is the idx of the first of the two revocation points
+       their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
+
+       on_holder_tx_csv: u16,
+
+       commitment_secrets: CounterpartyCommitmentSecrets,
+       counterparty_claimable_outpoints: HashMap<Txid, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
+       /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
+       /// Nor can we figure out their commitment numbers without the commitment transaction they are
+       /// spending. Thus, in order to claim them via revocation key, we track all the counterparty
+       /// commitment transactions which we find on-chain, mapping them to the commitment number which
+       /// can be used to derive the revocation key and claim the transactions.
+       counterparty_commitment_txn_on_chain: HashMap<Txid, (u64, Vec<Script>)>,
+       /// Cache used to make pruning of payment_preimages faster.
+       /// Maps payment_hash values to commitment numbers for counterparty transactions for non-revoked
+       /// counterparty transactions (ie should remain pretty small).
+       /// Serialized to disk but should generally not be sent to Watchtowers.
+       counterparty_hash_commitment_number: HashMap<PaymentHash, u64>,
+
+       // We store two holder commitment transactions to avoid any race conditions where we may update
+       // some monitors (potentially on watchtowers) but then fail to update others, resulting in the
+       // various monitors for one channel being out of sync, and us broadcasting a holder
+       // transaction for which we have deleted claim information on some watchtowers.
+       prev_holder_signed_commitment_tx: Option<HolderSignedTx>,
+       current_holder_commitment_tx: HolderSignedTx,
+
+       // Used just for ChannelManager to make sure it has the latest channel data during
+       // deserialization
+       current_counterparty_commitment_number: u64,
+       // Used just for ChannelManager to make sure it has the latest channel data during
+       // deserialization
+       current_holder_commitment_number: u64,
+
+       payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
+
+       pending_monitor_events: Vec<MonitorEvent>,
+       pending_events: Vec<Event>,
+
+       // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
+       // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
+       // actions when we receive a block with given height. Actions depend on OnchainEvent type.
+       onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
+
+       // If we get serialized out and re-read, we need to make sure that the chain monitoring
+       // interface knows about the TXOs that we want to be notified of spends of. We could probably
+       // be smart and derive them from the above storage fields, but its much simpler and more
+       // Obviously Correct (tm) if we just keep track of them explicitly.
+       outputs_to_watch: HashMap<Txid, Vec<Script>>,
+
+       #[cfg(test)]
+       pub onchain_tx_handler: OnchainTxHandler<ChanSigner>,
+       #[cfg(not(test))]
+       onchain_tx_handler: OnchainTxHandler<ChanSigner>,
+
+       // This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the
+       // channel has been force-closed. After this is set, no further holder commitment transaction
+       // updates may occur, and we panic!() if one is provided.
+       lockdown_from_offchain: bool,
+
+       // Set once we've signed a holder commitment transaction and handed it over to our
+       // OnchainTxHandler. After this is set, no future updates to our holder commitment transactions
+       // may occur, and we fail any such monitor updates.
+       //
+       // In case of update rejection due to a locally already signed commitment transaction, we
+       // nevertheless store update content to track in case of concurrent broadcast by another
+       // remote monitor out-of-order with regards to the block view.
+       holder_tx_signed: bool,
+
+       // We simply modify last_block_hash in Channel's block_connected so that serialization is
+       // consistent but hopefully the users' copy handles block_connected in a consistent way.
+       // (we do *not*, however, update them in update_monitor to ensure any local user copies keep
+       // their last_block_hash from its state and not based on updated copies that didn't run through
+       // the full block_connected).
+       last_block_hash: BlockHash,
+       secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
+}
+
+#[cfg(any(test, feature = "fuzztarget"))]
+/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
+/// underlying object
+impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
+       fn eq(&self, other: &Self) -> bool {
+               if self.latest_update_id != other.latest_update_id ||
+                       self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
+                       self.destination_script != other.destination_script ||
+                       self.broadcasted_holder_revokable_script != other.broadcasted_holder_revokable_script ||
+                       self.counterparty_payment_script != other.counterparty_payment_script ||
+                       self.keys.pubkeys() != other.keys.pubkeys() ||
+                       self.funding_info != other.funding_info ||
+                       self.current_counterparty_commitment_txid != other.current_counterparty_commitment_txid ||
+                       self.prev_counterparty_commitment_txid != other.prev_counterparty_commitment_txid ||
+                       self.counterparty_tx_cache != other.counterparty_tx_cache ||
+                       self.funding_redeemscript != other.funding_redeemscript ||
+                       self.channel_value_satoshis != other.channel_value_satoshis ||
+                       self.their_cur_revocation_points != other.their_cur_revocation_points ||
+                       self.on_holder_tx_csv != other.on_holder_tx_csv ||
+                       self.commitment_secrets != other.commitment_secrets ||
+                       self.counterparty_claimable_outpoints != other.counterparty_claimable_outpoints ||
+                       self.counterparty_commitment_txn_on_chain != other.counterparty_commitment_txn_on_chain ||
+                       self.counterparty_hash_commitment_number != other.counterparty_hash_commitment_number ||
+                       self.prev_holder_signed_commitment_tx != other.prev_holder_signed_commitment_tx ||
+                       self.current_counterparty_commitment_number != other.current_counterparty_commitment_number ||
+                       self.current_holder_commitment_number != other.current_holder_commitment_number ||
+                       self.current_holder_commitment_tx != other.current_holder_commitment_tx ||
+                       self.payment_preimages != other.payment_preimages ||
+                       self.pending_monitor_events != other.pending_monitor_events ||
+                       self.pending_events.len() != other.pending_events.len() || // We trust events to round-trip properly
+                       self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf ||
+                       self.outputs_to_watch != other.outputs_to_watch ||
+                       self.lockdown_from_offchain != other.lockdown_from_offchain ||
+                       self.holder_tx_signed != other.holder_tx_signed
+               {
+                       false
+               } else {
+                       true
+               }
+       }
+}
+
+impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
+       /// Writes this monitor into the given writer, suitable for writing to disk.
+       ///
+       /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
+       /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
+       /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
+       /// returned block hash and the the current chain and then reconnecting blocks to get to the
+       /// best chain) upon deserializing the object!
+       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
+               //TODO: We still write out all the serialization here manually instead of using the fancy
+               //serialization framework we have, we should migrate things over to it.
+               writer.write_all(&[SERIALIZATION_VERSION; 1])?;
+               writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+
+               self.latest_update_id.write(writer)?;
+
+               // Set in initial Channel-object creation, so should always be set by now:
+               U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
+
+               self.destination_script.write(writer)?;
+               if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
+                       writer.write_all(&[0; 1])?;
+                       broadcasted_holder_revokable_script.0.write(writer)?;
+                       broadcasted_holder_revokable_script.1.write(writer)?;
+                       broadcasted_holder_revokable_script.2.write(writer)?;
+               } else {
+                       writer.write_all(&[1; 1])?;
+               }
+
+               self.counterparty_payment_script.write(writer)?;
+               self.shutdown_script.write(writer)?;
+
+               self.keys.write(writer)?;
+               writer.write_all(&self.funding_info.0.txid[..])?;
+               writer.write_all(&byte_utils::be16_to_array(self.funding_info.0.index))?;
+               self.funding_info.1.write(writer)?;
+               self.current_counterparty_commitment_txid.write(writer)?;
+               self.prev_counterparty_commitment_txid.write(writer)?;
+
+               self.counterparty_tx_cache.write(writer)?;
+               self.funding_redeemscript.write(writer)?;
+               self.channel_value_satoshis.write(writer)?;
+
+               match self.their_cur_revocation_points {
+                       Some((idx, pubkey, second_option)) => {
+                               writer.write_all(&byte_utils::be48_to_array(idx))?;
+                               writer.write_all(&pubkey.serialize())?;
+                               match second_option {
+                                       Some(second_pubkey) => {
+                                               writer.write_all(&second_pubkey.serialize())?;
+                                       },
+                                       None => {
+                                               writer.write_all(&[0; 33])?;
+                                       },
+                               }
+                       },
+                       None => {
+                               writer.write_all(&byte_utils::be48_to_array(0))?;
+                       },
+               }
+
+               writer.write_all(&byte_utils::be16_to_array(self.on_holder_tx_csv))?;
+
+               self.commitment_secrets.write(writer)?;
+
+               macro_rules! serialize_htlc_in_commitment {
+                       ($htlc_output: expr) => {
+                               writer.write_all(&[$htlc_output.offered as u8; 1])?;
+                               writer.write_all(&byte_utils::be64_to_array($htlc_output.amount_msat))?;
+                               writer.write_all(&byte_utils::be32_to_array($htlc_output.cltv_expiry))?;
+                               writer.write_all(&$htlc_output.payment_hash.0[..])?;
+                               $htlc_output.transaction_output_index.write(writer)?;
+                       }
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.counterparty_claimable_outpoints.len() as u64))?;
+               for (ref txid, ref htlc_infos) in self.counterparty_claimable_outpoints.iter() {
+                       writer.write_all(&txid[..])?;
+                       writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?;
+                       for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() {
+                               serialize_htlc_in_commitment!(htlc_output);
+                               htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?;
+                       }
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.counterparty_commitment_txn_on_chain.len() as u64))?;
+               for (ref txid, &(commitment_number, ref txouts)) in self.counterparty_commitment_txn_on_chain.iter() {
+                       writer.write_all(&txid[..])?;
+                       writer.write_all(&byte_utils::be48_to_array(commitment_number))?;
+                       (txouts.len() as u64).write(writer)?;
+                       for script in txouts.iter() {
+                               script.write(writer)?;
+                       }
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.counterparty_hash_commitment_number.len() as u64))?;
+               for (ref payment_hash, commitment_number) in self.counterparty_hash_commitment_number.iter() {
+                       writer.write_all(&payment_hash.0[..])?;
+                       writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
+               }
+
+               macro_rules! serialize_holder_tx {
+                       ($holder_tx: expr) => {
+                               $holder_tx.txid.write(writer)?;
+                               writer.write_all(&$holder_tx.revocation_key.serialize())?;
+                               writer.write_all(&$holder_tx.a_htlc_key.serialize())?;
+                               writer.write_all(&$holder_tx.b_htlc_key.serialize())?;
+                               writer.write_all(&$holder_tx.delayed_payment_key.serialize())?;
+                               writer.write_all(&$holder_tx.per_commitment_point.serialize())?;
+
+                               writer.write_all(&byte_utils::be32_to_array($holder_tx.feerate_per_kw))?;
+                               writer.write_all(&byte_utils::be64_to_array($holder_tx.htlc_outputs.len() as u64))?;
+                               for &(ref htlc_output, ref sig, ref htlc_source) in $holder_tx.htlc_outputs.iter() {
+                                       serialize_htlc_in_commitment!(htlc_output);
+                                       if let &Some(ref their_sig) = sig {
+                                               1u8.write(writer)?;
+                                               writer.write_all(&their_sig.serialize_compact())?;
+                                       } else {
+                                               0u8.write(writer)?;
+                                       }
+                                       htlc_source.write(writer)?;
+                               }
+                       }
+               }
+
+               if let Some(ref prev_holder_tx) = self.prev_holder_signed_commitment_tx {
+                       writer.write_all(&[1; 1])?;
+                       serialize_holder_tx!(prev_holder_tx);
+               } else {
+                       writer.write_all(&[0; 1])?;
+               }
+
+               serialize_holder_tx!(self.current_holder_commitment_tx);
+
+               writer.write_all(&byte_utils::be48_to_array(self.current_counterparty_commitment_number))?;
+               writer.write_all(&byte_utils::be48_to_array(self.current_holder_commitment_number))?;
+
+               writer.write_all(&byte_utils::be64_to_array(self.payment_preimages.len() as u64))?;
+               for payment_preimage in self.payment_preimages.values() {
+                       writer.write_all(&payment_preimage.0[..])?;
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.pending_monitor_events.len() as u64))?;
+               for event in self.pending_monitor_events.iter() {
+                       match event {
+                               MonitorEvent::HTLCEvent(upd) => {
+                                       0u8.write(writer)?;
+                                       upd.write(writer)?;
+                               },
+                               MonitorEvent::CommitmentTxBroadcasted(_) => 1u8.write(writer)?
+                       }
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.pending_events.len() as u64))?;
+               for event in self.pending_events.iter() {
+                       event.write(writer)?;
+               }
+
+               self.last_block_hash.write(writer)?;
+
+               writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
+               for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
+                       writer.write_all(&byte_utils::be32_to_array(**target))?;
+                       writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
+                       for ev in events.iter() {
+                               match *ev {
+                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
+                                               0u8.write(writer)?;
+                                               htlc_update.0.write(writer)?;
+                                               htlc_update.1.write(writer)?;
+                                       },
+                                       OnchainEvent::MaturingOutput { ref descriptor } => {
+                                               1u8.write(writer)?;
+                                               descriptor.write(writer)?;
+                                       },
+                               }
+                       }
+               }
+
+               (self.outputs_to_watch.len() as u64).write(writer)?;
+               for (txid, output_scripts) in self.outputs_to_watch.iter() {
+                       txid.write(writer)?;
+                       (output_scripts.len() as u64).write(writer)?;
+                       for script in output_scripts.iter() {
+                               script.write(writer)?;
+                       }
+               }
+               self.onchain_tx_handler.write(writer)?;
+
+               self.lockdown_from_offchain.write(writer)?;
+               self.holder_tx_signed.write(writer)?;
+
+               Ok(())
+       }
+}
+
+impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
+       pub(crate) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey,
+                       on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script),
+                       counterparty_htlc_base_key: &PublicKey, counterparty_delayed_payment_base_key: &PublicKey,
+                       on_holder_tx_csv: u16, funding_redeemscript: Script, channel_value_satoshis: u64,
+                       commitment_transaction_number_obscure_factor: u64,
+                       initial_holder_commitment_tx: HolderCommitmentTransaction) -> ChannelMonitor<ChanSigner> {
+
+               assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
+               let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize());
+               let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script();
+               let payment_key_hash = WPubkeyHash::hash(&keys.pubkeys().payment_point.serialize());
+               let counterparty_payment_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_key_hash[..]).into_script();
+
+               let counterparty_tx_cache = CounterpartyCommitmentTransaction { counterparty_delayed_payment_base_key: *counterparty_delayed_payment_base_key, counterparty_htlc_base_key: *counterparty_htlc_base_key, on_counterparty_tx_csv, per_htlc: HashMap::new() };
+
+               let mut onchain_tx_handler = OnchainTxHandler::new(destination_script.clone(), keys.clone(), on_holder_tx_csv);
+
+               let holder_tx_sequence = initial_holder_commitment_tx.unsigned_tx.input[0].sequence as u64;
+               let holder_tx_locktime = initial_holder_commitment_tx.unsigned_tx.lock_time as u64;
+               let holder_commitment_tx = HolderSignedTx {
+                       txid: initial_holder_commitment_tx.txid(),
+                       revocation_key: initial_holder_commitment_tx.keys.revocation_key,
+                       a_htlc_key: initial_holder_commitment_tx.keys.broadcaster_htlc_key,
+                       b_htlc_key: initial_holder_commitment_tx.keys.countersignatory_htlc_key,
+                       delayed_payment_key: initial_holder_commitment_tx.keys.broadcaster_delayed_payment_key,
+                       per_commitment_point: initial_holder_commitment_tx.keys.per_commitment_point,
+                       feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw,
+                       htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions
+               };
+               onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx);
+
+               let mut outputs_to_watch = HashMap::new();
+               outputs_to_watch.insert(funding_info.0.txid, vec![funding_info.1.clone()]);
+
+               ChannelMonitor {
+                       latest_update_id: 0,
+                       commitment_transaction_number_obscure_factor,
+
+                       destination_script: destination_script.clone(),
+                       broadcasted_holder_revokable_script: None,
+                       counterparty_payment_script,
+                       shutdown_script,
+
+                       keys,
+                       funding_info,
+                       current_counterparty_commitment_txid: None,
+                       prev_counterparty_commitment_txid: None,
+
+                       counterparty_tx_cache,
+                       funding_redeemscript,
+                       channel_value_satoshis: channel_value_satoshis,
+                       their_cur_revocation_points: None,
+
+                       on_holder_tx_csv,
+
+                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
+                       counterparty_claimable_outpoints: HashMap::new(),
+                       counterparty_commitment_txn_on_chain: HashMap::new(),
+                       counterparty_hash_commitment_number: HashMap::new(),
+
+                       prev_holder_signed_commitment_tx: None,
+                       current_holder_commitment_tx: holder_commitment_tx,
+                       current_counterparty_commitment_number: 1 << 48,
+                       current_holder_commitment_number: 0xffff_ffff_ffff - ((((holder_tx_sequence & 0xffffff) << 3*8) | (holder_tx_locktime as u64 & 0xffffff)) ^ commitment_transaction_number_obscure_factor),
+
+                       payment_preimages: HashMap::new(),
+                       pending_monitor_events: Vec::new(),
+                       pending_events: Vec::new(),
+
+                       onchain_events_waiting_threshold_conf: HashMap::new(),
+                       outputs_to_watch,
+
+                       onchain_tx_handler,
+
+                       lockdown_from_offchain: false,
+                       holder_tx_signed: false,
+
+                       last_block_hash: Default::default(),
+                       secp_ctx: Secp256k1::new(),
+               }
+       }
+
+       /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
+       /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
+       /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key).
+       fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
+               if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
+                       return Err(MonitorUpdateError("Previous secret did not match new one"));
+               }
+
+               // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
+               // events for now-revoked/fulfilled HTLCs.
+               if let Some(txid) = self.prev_counterparty_commitment_txid.take() {
+                       for &mut (_, ref mut source) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
+                               *source = None;
+                       }
+               }
+
+               if !self.payment_preimages.is_empty() {
+                       let cur_holder_signed_commitment_tx = &self.current_holder_commitment_tx;
+                       let prev_holder_signed_commitment_tx = self.prev_holder_signed_commitment_tx.as_ref();
+                       let min_idx = self.get_min_seen_secret();
+                       let counterparty_hash_commitment_number = &mut self.counterparty_hash_commitment_number;
+
+                       self.payment_preimages.retain(|&k, _| {
+                               for &(ref htlc, _, _) in cur_holder_signed_commitment_tx.htlc_outputs.iter() {
+                                       if k == htlc.payment_hash {
+                                               return true
+                                       }
+                               }
+                               if let Some(prev_holder_commitment_tx) = prev_holder_signed_commitment_tx {
+                                       for &(ref htlc, _, _) in prev_holder_commitment_tx.htlc_outputs.iter() {
+                                               if k == htlc.payment_hash {
+                                                       return true
+                                               }
+                                       }
+                               }
+                               let contains = if let Some(cn) = counterparty_hash_commitment_number.get(&k) {
+                                       if *cn < min_idx {
+                                               return true
+                                       }
+                                       true
+                               } else { false };
+                               if contains {
+                                       counterparty_hash_commitment_number.remove(&k);
+                               }
+                               false
+                       });
+               }
+
+               Ok(())
+       }
+
+       /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
+       /// The monitor watches for it to be broadcasted and then uses the HTLC information (and
+       /// possibly future revocation/preimage information) to claim outputs where possible.
+       /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers.
+       pub(crate) fn provide_latest_counterparty_commitment_tx_info<L: Deref>(&mut self, unsigned_commitment_tx: &Transaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger {
+               // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
+               // so that a remote monitor doesn't learn anything unless there is a malicious close.
+               // (only maybe, sadly we cant do the same for local info, as we need to be aware of
+               // timeouts)
+               for &(ref htlc, _) in &htlc_outputs {
+                       self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number);
+               }
+
+               let new_txid = unsigned_commitment_tx.txid();
+               log_trace!(logger, "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", new_txid, commitment_number, htlc_outputs.len());
+               log_trace!(logger, "New potential counterparty commitment transaction: {}", encode::serialize_hex(unsigned_commitment_tx));
+               self.prev_counterparty_commitment_txid = self.current_counterparty_commitment_txid.take();
+               self.current_counterparty_commitment_txid = Some(new_txid);
+               self.counterparty_claimable_outpoints.insert(new_txid, htlc_outputs.clone());
+               self.current_counterparty_commitment_number = commitment_number;
+               //TODO: Merge this into the other per-counterparty-transaction output storage stuff
+               match self.their_cur_revocation_points {
+                       Some(old_points) => {
+                               if old_points.0 == commitment_number + 1 {
+                                       self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point)));
+                               } else if old_points.0 == commitment_number + 2 {
+                                       if let Some(old_second_point) = old_points.2 {
+                                               self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point)));
+                                       } else {
+                                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                                       }
+                               } else {
+                                       self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                               }
+                       },
+                       None => {
+                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+                       }
+               }
+               let mut htlcs = Vec::with_capacity(htlc_outputs.len());
+               for htlc in htlc_outputs {
+                       if htlc.0.transaction_output_index.is_some() {
+                               htlcs.push(htlc.0);
+                       }
+               }
+               self.counterparty_tx_cache.per_htlc.insert(new_txid, htlcs);
+       }
+
+       /// Informs this monitor of the latest holder (ie broadcastable) commitment transaction. The
+       /// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
+       /// is important that any clones of this channel monitor (including remote clones) by kept
+       /// up-to-date as our holder commitment transaction is updated.
+       /// Panics if set_on_holder_tx_csv has never been called.
+       fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
+               let txid = commitment_tx.txid();
+               let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64;
+               let locktime = commitment_tx.unsigned_tx.lock_time as u64;
+               let mut new_holder_commitment_tx = HolderSignedTx {
+                       txid,
+                       revocation_key: commitment_tx.keys.revocation_key,
+                       a_htlc_key: commitment_tx.keys.broadcaster_htlc_key,
+                       b_htlc_key: commitment_tx.keys.countersignatory_htlc_key,
+                       delayed_payment_key: commitment_tx.keys.broadcaster_delayed_payment_key,
+                       per_commitment_point: commitment_tx.keys.per_commitment_point,
+                       feerate_per_kw: commitment_tx.feerate_per_kw,
+                       htlc_outputs: htlc_outputs,
+               };
+               self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx);
+               self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+               mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
+               self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
+               if self.holder_tx_signed {
+                       return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected"));
+               }
+               Ok(())
+       }
+
+       /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
+       /// commitment_tx_infos which contain the payment hash have been revoked.
+       pub(crate) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) {
+               self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
+       }
+
+       pub(crate) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
+               where B::Target: BroadcasterInterface,
+                                       L::Target: Logger,
+       {
+               for tx in self.get_latest_holder_commitment_txn(logger).iter() {
+                       broadcaster.broadcast_transaction(tx);
+               }
+               self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+       }
+
+       /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
+       /// itself.
+       ///
+       /// panics if the given update is not the next update by update_id.
+       pub fn update_monitor<B: Deref, L: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
+               where B::Target: BroadcasterInterface,
+                                       L::Target: Logger,
+       {
+               if self.latest_update_id + 1 != updates.update_id {
+                       panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
+               }
+               for update in updates.updates.drain(..) {
+                       match update {
+                               ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
+                                       if self.lockdown_from_offchain { panic!(); }
+                                       self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)?
+                               },
+                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
+                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger),
+                               ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
+                                       self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
+                               ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
+                                       self.provide_secret(idx, secret)?,
+                               ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
+                                       self.lockdown_from_offchain = true;
+                                       if should_broadcast {
+                                               self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
+                                       } else {
+                                               log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
+                                       }
+                               }
+                       }
+               }
+               self.latest_update_id = updates.update_id;
+               Ok(())
+       }
+
+       /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
+       /// ChannelMonitor.
+       pub fn get_latest_update_id(&self) -> u64 {
+               self.latest_update_id
+       }
+
+       /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
+       pub fn get_funding_txo(&self) -> &(OutPoint, Script) {
+               &self.funding_info
+       }
+
+       /// Gets a list of txids, with their output scripts (in the order they appear in the
+       /// transaction), which we must learn about spends of via block_connected().
+       ///
+       /// (C-not exported) because we have no HashMap bindings
+       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<Script>> {
+               &self.outputs_to_watch
+       }
+
+       /// Gets the sets of all outpoints which this ChannelMonitor expects to hear about spends of.
+       /// Generally useful when deserializing as during normal operation the return values of
+       /// block_connected are sufficient to ensure all relevant outpoints are being monitored (note
+       /// that the get_funding_txo outpoint and transaction must also be monitored for!).
+       ///
+       /// (C-not exported) as there is no practical way to track lifetimes of returned values.
+       pub fn get_monitored_outpoints(&self) -> Vec<(Txid, u32, &Script)> {
+               let mut res = Vec::with_capacity(self.counterparty_commitment_txn_on_chain.len() * 2);
+               for (ref txid, &(_, ref outputs)) in self.counterparty_commitment_txn_on_chain.iter() {
+                       for (idx, output) in outputs.iter().enumerate() {
+                               res.push(((*txid).clone(), idx as u32, output));
+                       }
+               }
+               res
+       }
+
+       /// Get the list of HTLCs who's status has been updated on chain. This should be called by
+       /// ChannelManager via [`chain::Watch::release_pending_monitor_events`].
+       ///
+       /// [`chain::Watch::release_pending_monitor_events`]: ../trait.Watch.html#tymethod.release_pending_monitor_events
+       pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
+               let mut ret = Vec::new();
+               mem::swap(&mut ret, &mut self.pending_monitor_events);
+               ret
+       }
+
+       /// Gets the list of pending events which were generated by previous actions, clearing the list
+       /// in the process.
+       ///
+       /// This is called by ChainMonitor::get_and_clear_pending_events() and is equivalent to
+       /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
+       /// no internal locking in ChannelMonitors.
+       pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
+               let mut ret = Vec::new();
+               mem::swap(&mut ret, &mut self.pending_events);
+               ret
+       }
+
+       /// Can only fail if idx is < get_min_seen_secret
+       fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
+               self.commitment_secrets.get_secret(idx)
+       }
+
+       pub(crate) fn get_min_seen_secret(&self) -> u64 {
+               self.commitment_secrets.get_min_seen_secret()
+       }
+
+       pub(crate) fn get_cur_counterparty_commitment_number(&self) -> u64 {
+               self.current_counterparty_commitment_number
+       }
+
+       pub(crate) fn get_cur_holder_commitment_number(&self) -> u64 {
+               self.current_holder_commitment_number
+       }
+
+       /// Attempts to claim a counterparty commitment transaction's outputs using the revocation key and
+       /// data in counterparty_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
+       /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
+       /// HTLC-Success/HTLC-Timeout transactions.
+       /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
+       /// revoked counterparty commitment tx
+       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
+               // Most secp and related errors trying to create keys means we have no hope of constructing
+               // a spend transaction...so we return no transactions to broadcast
+               let mut claimable_outpoints = Vec::new();
+               let mut watch_outputs = Vec::new();
+
+               let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
+               let per_commitment_option = self.counterparty_claimable_outpoints.get(&commitment_txid);
+
+               macro_rules! ignore_error {
+                       ( $thing : expr ) => {
+                               match $thing {
+                                       Ok(a) => a,
+                                       Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs))
+                               }
+                       };
+               }
+
+               let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+               if commitment_number >= self.get_min_seen_secret() {
+                       let secret = self.get_secret(commitment_number).unwrap();
+                       let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
+                       let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
+                       let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.keys.pubkeys().revocation_basepoint));
+                       let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.counterparty_tx_cache.counterparty_delayed_payment_base_key));
+
+                       let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.counterparty_tx_cache.on_counterparty_tx_csv, &delayed_key);
+                       let revokeable_p2wsh = revokeable_redeemscript.to_v0_p2wsh();
+
+                       // First, process non-htlc outputs (to_holder & to_counterparty)
+                       for (idx, outp) in tx.output.iter().enumerate() {
+                               if outp.script_pubkey == revokeable_p2wsh {
+                                       let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: outp.value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
+                                       claimable_outpoints.push(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 }, witness_data});
+                               }
+                       }
+
+                       // Then, try to find revoked htlc outputs
+                       if let Some(ref per_commitment_data) = per_commitment_option {
+                               for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
+                                       if let Some(transaction_output_index) = htlc.transaction_output_index {
+                                               if transaction_output_index as usize >= tx.output.len() ||
+                                                               tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
+                                                       return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
+                                               }
+                                               let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC }, amount: tx.output[transaction_output_index as usize].value, htlc: Some(htlc.clone()), on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
+                                               claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
+                                       }
+                               }
+                       }
+
+                       // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
+                       if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
+                               // We're definitely a counterparty commitment transaction!
+                               log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
+                               watch_outputs.append(&mut tx.output.clone());
+                               self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+
+                               macro_rules! check_htlc_fails {
+                                       ($txid: expr, $commitment_tx: expr) => {
+                                               if let Some(ref outpoints) = self.counterparty_claimable_outpoints.get($txid) {
+                                                       for &(ref htlc, ref source_option) in outpoints.iter() {
+                                                               if let &Some(ref source) = source_option {
+                                                                       log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+                                                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+                                                                               hash_map::Entry::Occupied(mut entry) => {
+                                                                                       let e = entry.get_mut();
+                                                                                       e.retain(|ref event| {
+                                                                                               match **event {
+                                                                                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
+                                                                                                               return htlc_update.0 != **source
+                                                                                                       },
+                                                                                                       _ => true
+                                                                                               }
+                                                                                       });
+                                                                                       e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
+                                                                               }
+                                                                               hash_map::Entry::Vacant(entry) => {
+                                                                                       entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
+                                                                               }
+                                                                       }
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                               if let Some(ref txid) = self.current_counterparty_commitment_txid {
+                                       check_htlc_fails!(txid, "current");
+                               }
+                               if let Some(ref txid) = self.prev_counterparty_commitment_txid {
+                                       check_htlc_fails!(txid, "counterparty");
+                               }
+                               // No need to check holder commitment txn, symmetric HTLCSource must be present as per-htlc data on counterparty commitment tx
+                       }
+               } else if let Some(per_commitment_data) = per_commitment_option {
+                       // While this isn't useful yet, there is a potential race where if a counterparty
+                       // revokes a state at the same time as the commitment transaction for that state is
+                       // confirmed, and the watchtower receives the block before the user, the user could
+                       // upload a new ChannelMonitor with the revocation secret but the watchtower has
+                       // already processed the block, resulting in the counterparty_commitment_txn_on_chain entry
+                       // not being generated by the above conditional. Thus, to be safe, we go ahead and
+                       // insert it here.
+                       watch_outputs.append(&mut tx.output.clone());
+                       self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+
+                       log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
+
+                       macro_rules! check_htlc_fails {
+                               ($txid: expr, $commitment_tx: expr, $id: tt) => {
+                                       if let Some(ref latest_outpoints) = self.counterparty_claimable_outpoints.get($txid) {
+                                               $id: for &(ref htlc, ref source_option) in latest_outpoints.iter() {
+                                                       if let &Some(ref source) = source_option {
+                                                               // Check if the HTLC is present in the commitment transaction that was
+                                                               // broadcast, but not if it was below the dust limit, which we should
+                                                               // fail backwards immediately as there is no way for us to learn the
+                                                               // payment_preimage.
+                                                               // Note that if the dust limit were allowed to change between
+                                                               // commitment transactions we'd want to be check whether *any*
+                                                               // broadcastable commitment transaction has the HTLC in it, but it
+                                                               // cannot currently change after channel initialization, so we don't
+                                                               // need to here.
+                                                               for &(ref broadcast_htlc, ref broadcast_source) in per_commitment_data.iter() {
+                                                                       if broadcast_htlc.transaction_output_index.is_some() && Some(source) == broadcast_source.as_ref() {
+                                                                               continue $id;
+                                                                       }
+                                                               }
+                                                               log_trace!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of counterparty commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
+                                                               match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+                                                                       hash_map::Entry::Occupied(mut entry) => {
+                                                                               let e = entry.get_mut();
+                                                                               e.retain(|ref event| {
+                                                                                       match **event {
+                                                                                               OnchainEvent::HTLCUpdate { ref htlc_update } => {
+                                                                                                       return htlc_update.0 != **source
+                                                                                               },
+                                                                                               _ => true
+                                                                                       }
+                                                                               });
+                                                                               e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
+                                                                       }
+                                                                       hash_map::Entry::Vacant(entry) => {
+                                                                               entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
+                                                                       }
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       if let Some(ref txid) = self.current_counterparty_commitment_txid {
+                               check_htlc_fails!(txid, "current", 'current_loop);
+                       }
+                       if let Some(ref txid) = self.prev_counterparty_commitment_txid {
+                               check_htlc_fails!(txid, "previous", 'prev_loop);
+                       }
+
+                       if let Some(revocation_points) = self.their_cur_revocation_points {
+                               let revocation_point_option =
+                                       if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
+                                       else if let Some(point) = revocation_points.2.as_ref() {
+                                               if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
+                                       } else { None };
+                               if let Some(revocation_point) = revocation_point_option {
+                                       self.counterparty_payment_script = {
+                                               // Note that the Network here is ignored as we immediately drop the address for the
+                                               // script_pubkey version
+                                               let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize());
+                                               Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script()
+                                       };
+
+                                       // Then, try to find htlc outputs
+                                       for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
+                                               if let Some(transaction_output_index) = htlc.transaction_output_index {
+                                                       if transaction_output_index as usize >= tx.output.len() ||
+                                                                       tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
+                                                               return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
+                                                       }
+                                                       let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
+                                                       let aggregable = if !htlc.offered { false } else { true };
+                                                       if preimage.is_some() || !htlc.offered {
+                                                               let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() };
+                                                               claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+               }
+               (claimable_outpoints, (commitment_txid, watch_outputs))
+       }
+
+       /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
+       fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<ClaimRequest>, Option<(Txid, Vec<TxOut>)>) where L::Target: Logger {
+               let htlc_txid = tx.txid();
+               if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 {
+                       return (Vec::new(), None)
+               }
+
+               macro_rules! ignore_error {
+                       ( $thing : expr ) => {
+                               match $thing {
+                                       Ok(a) => a,
+                                       Err(_) => return (Vec::new(), None)
+                               }
+                       };
+               }
+
+               let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); };
+               let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
+               let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
+
+               log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
+               let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key,  per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv };
+               let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data });
+               (claimable_outpoints, Some((htlc_txid, tx.output.clone())))
+       }
+
+       fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Vec<TxOut>, Option<(Script, PublicKey, PublicKey)>) {
+               let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len());
+               let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len());
+
+               let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key);
+               let broadcasted_holder_revokable_script = Some((redeemscript.to_v0_p2wsh(), holder_tx.per_commitment_point.clone(), holder_tx.revocation_key.clone()));
+
+               for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
+                       if let Some(transaction_output_index) = htlc.transaction_output_index {
+                               claim_requests.push(ClaimRequest { absolute_timelock: ::std::u32::MAX, aggregable: false, outpoint: BitcoinOutPoint { txid: holder_tx.txid, vout: transaction_output_index as u32 },
+                                       witness_data: InputMaterial::HolderHTLC {
+                                               preimage: if !htlc.offered {
+                                                               if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
+                                                                       Some(preimage.clone())
+                                                               } else {
+                                                                       // We can't build an HTLC-Success transaction without the preimage
+                                                                       continue;
+                                                               }
+                                                       } else { None },
+                                               amount: htlc.amount_msat,
+                               }});
+                               watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone());
+                       }
+               }
+
+               (claim_requests, watch_outputs, broadcasted_holder_revokable_script)
+       }
+
+       /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
+       /// revoked using data in holder_claimable_outpoints.
+       /// Should not be used if check_spend_revoked_transaction succeeds.
+       fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
+               let commitment_txid = tx.txid();
+               let mut claim_requests = Vec::new();
+               let mut watch_outputs = Vec::new();
+
+               macro_rules! wait_threshold_conf {
+                       ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
+                               log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+                               match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
+                                       hash_map::Entry::Occupied(mut entry) => {
+                                               let e = entry.get_mut();
+                                               e.retain(|ref event| {
+                                                       match **event {
+                                                               OnchainEvent::HTLCUpdate { ref htlc_update } => {
+                                                                       return htlc_update.0 != $source
+                                                               },
+                                                               _ => true
+                                                       }
+                                               });
+                                               e.push(OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)});
+                                       }
+                                       hash_map::Entry::Vacant(entry) => {
+                                               entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}]);
+                                       }
+                               }
+                       }
+               }
+
+               macro_rules! append_onchain_update {
+                       ($updates: expr) => {
+                               claim_requests = $updates.0;
+                               watch_outputs.append(&mut $updates.1);
+                               self.broadcasted_holder_revokable_script = $updates.2;
+                       }
+               }
+
+               // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
+               let mut is_holder_tx = false;
+
+               if self.current_holder_commitment_tx.txid == commitment_txid {
+                       is_holder_tx = true;
+                       log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
+                       let mut res = self.broadcast_by_holder_state(tx, &self.current_holder_commitment_tx);
+                       append_onchain_update!(res);
+               } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
+                       if holder_tx.txid == commitment_txid {
+                               is_holder_tx = true;
+                               log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
+                               let mut res = self.broadcast_by_holder_state(tx, holder_tx);
+                               append_onchain_update!(res);
+                       }
+               }
+
+               macro_rules! fail_dust_htlcs_after_threshold_conf {
+                       ($holder_tx: expr) => {
+                               for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs {
+                                       if htlc.transaction_output_index.is_none() {
+                                               if let &Some(ref source) = source {
+                                                       wait_threshold_conf!(height, source.clone(), "lastest", htlc.payment_hash.clone());
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               if is_holder_tx {
+                       fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx);
+                       if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
+                               fail_dust_htlcs_after_threshold_conf!(holder_tx);
+                       }
+               }
+
+               (claim_requests, (commitment_txid, watch_outputs))
+       }
+
+       /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
+       /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
+       /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
+       /// a higher revocation secret than the holder commitment number we are aware of. Broadcasting these
+       /// transactions are UNSAFE, as they allow counterparty side to punish you. Nevertheless you may want to
+       /// broadcast them if counterparty don't close channel with his higher commitment transaction after a
+       /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
+       /// out-of-band the other node operator to coordinate with him if option is available to you.
+       /// In any-case, choice is up to the user.
+       pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+               log_trace!(logger, "Getting signed latest holder commitment transaction!");
+               self.holder_tx_signed = true;
+               if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
+                       let txid = commitment_tx.txid();
+                       let mut res = vec![commitment_tx];
+                       for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
+                               if let Some(vout) = htlc.0.transaction_output_index {
+                                       let preimage = if !htlc.0.offered {
+                                                       if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
+                                                               // We can't build an HTLC-Success transaction without the preimage
+                                                               continue;
+                                                       }
+                                               } else { None };
+                                       if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
+                                                       &::bitcoin::OutPoint { txid, vout }, &preimage) {
+                                               res.push(htlc_tx);
+                                       }
+                               }
+                       }
+                       // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
+                       // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation.
+                       return res
+               }
+               Vec::new()
+       }
+
+       /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
+       /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
+       /// revoked commitment transaction.
+       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
+       pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
+               log_trace!(logger, "Getting signed copy of latest holder commitment transaction!");
+               if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript) {
+                       let txid = commitment_tx.txid();
+                       let mut res = vec![commitment_tx];
+                       for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
+                               if let Some(vout) = htlc.0.transaction_output_index {
+                                       let preimage = if !htlc.0.offered {
+                                                       if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
+                                                               // We can't build an HTLC-Success transaction without the preimage
+                                                               continue;
+                                                       }
+                                               } else { None };
+                                       if let Some(htlc_tx) = self.onchain_tx_handler.unsafe_get_fully_signed_htlc_tx(
+                                                       &::bitcoin::OutPoint { txid, vout }, &preimage) {
+                                               res.push(htlc_tx);
+                                       }
+                               }
+                       }
+                       return res
+               }
+               Vec::new()
+       }
+
+       /// Processes transactions in a newly connected block, which may result in any of the following:
+       /// - update the monitor's state against resolved HTLCs
+       /// - punish the counterparty in the case of seeing a revoked commitment transaction
+       /// - force close the channel and claim/timeout incoming/outgoing HTLCs if near expiration
+       /// - detect settled outputs for later spending
+       /// - schedule and bump any in-flight claims
+       ///
+       /// Returns any new outputs to watch from `txdata`; after called, these are also included in
+       /// [`get_outputs_to_watch`].
+       ///
+       /// [`get_outputs_to_watch`]: #method.get_outputs_to_watch
+       pub fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<TxOut>)>
+               where B::Target: BroadcasterInterface,
+                     F::Target: FeeEstimator,
+                                       L::Target: Logger,
+       {
+               let txn_matched = self.filter_block(txdata);
+               for tx in &txn_matched {
+                       let mut output_val = 0;
+                       for out in tx.output.iter() {
+                               if out.value > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); }
+                               output_val += out.value;
+                               if output_val > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); }
+                       }
+               }
+
+               let block_hash = header.block_hash();
+               log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
+
+               let mut watch_outputs = Vec::new();
+               let mut claimable_outpoints = Vec::new();
+               for tx in &txn_matched {
+                       if tx.input.len() == 1 {
+                               // Assuming our keys were not leaked (in which case we're screwed no matter what),
+                               // commitment transactions and HTLC transactions will all only ever have one input,
+                               // which is an easy way to filter out any potential non-matching txn for lazy
+                               // filters.
+                               let prevout = &tx.input[0].previous_output;
+                               if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
+                                       if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
+                                               let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger);
+                                               if !new_outputs.1.is_empty() {
+                                                       watch_outputs.push(new_outputs);
+                                               }
+                                               if new_outpoints.is_empty() {
+                                                       let (mut new_outpoints, new_outputs) = self.check_spend_holder_transaction(&tx, height, &logger);
+                                                       if !new_outputs.1.is_empty() {
+                                                               watch_outputs.push(new_outputs);
+                                                       }
+                                                       claimable_outpoints.append(&mut new_outpoints);
+                                               }
+                                               claimable_outpoints.append(&mut new_outpoints);
+                                       }
+                               } else {
+                                       if let Some(&(commitment_number, _)) = self.counterparty_commitment_txn_on_chain.get(&prevout.txid) {
+                                               let (mut new_outpoints, new_outputs_option) = self.check_spend_counterparty_htlc(&tx, commitment_number, height, &logger);
+                                               claimable_outpoints.append(&mut new_outpoints);
+                                               if let Some(new_outputs) = new_outputs_option {
+                                                       watch_outputs.push(new_outputs);
+                                               }
+                                       }
+                               }
+                       }
+                       // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
+                       // can also be resolved in a few other ways which can have more than one output. Thus,
+                       // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
+                       self.is_resolving_htlc_output(&tx, height, &logger);
+
+                       self.is_paying_spendable_output(&tx, height, &logger);
+               }
+               let should_broadcast = self.would_broadcast_at_height(height, &logger);
+               if should_broadcast {
+                       claimable_outpoints.push(ClaimRequest { absolute_timelock: height, aggregable: false, outpoint: BitcoinOutPoint { txid: self.funding_info.0.txid.clone(), vout: self.funding_info.0.index as u32 }, witness_data: InputMaterial::Funding { funding_redeemscript: self.funding_redeemscript.clone() }});
+               }
+               if should_broadcast {
+                       self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
+                       if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
+                               self.holder_tx_signed = true;
+                               let (mut new_outpoints, new_outputs, _) = self.broadcast_by_holder_state(&commitment_tx, &self.current_holder_commitment_tx);
+                               if !new_outputs.is_empty() {
+                                       watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
+                               }
+                               claimable_outpoints.append(&mut new_outpoints);
+                       }
+               }
+               if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
+                       for ev in events {
+                               match ev {
+                                       OnchainEvent::HTLCUpdate { htlc_update } => {
+                                               log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
+                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
+                                                       payment_hash: htlc_update.1,
+                                                       payment_preimage: None,
+                                                       source: htlc_update.0,
+                                               }));
+                                       },
+                                       OnchainEvent::MaturingOutput { descriptor } => {
+                                               log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
+                                               self.pending_events.push(Event::SpendableOutputs {
+                                                       outputs: vec![descriptor]
+                                               });
+                                       }
+                               }
+                       }
+               }
+
+               self.onchain_tx_handler.block_connected(&txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger);
+               self.last_block_hash = block_hash;
+
+               // Determine new outputs to watch by comparing against previously known outputs to watch,
+               // updating the latter in the process.
+               watch_outputs.retain(|&(ref txid, ref txouts)| {
+                       let output_scripts = txouts.iter().map(|o| o.script_pubkey.clone()).collect();
+                       self.outputs_to_watch.insert(txid.clone(), output_scripts).is_none()
+               });
+               watch_outputs
+       }
+
+       /// Determines if the disconnected block contained any transactions of interest and updates
+       /// appropriately.
+       pub fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, header: &BlockHeader, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+               where B::Target: BroadcasterInterface,
+                     F::Target: FeeEstimator,
+                     L::Target: Logger,
+       {
+               let block_hash = header.block_hash();
+               log_trace!(logger, "Block {} at height {} disconnected", block_hash, height);
+
+               if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
+                       //We may discard:
+                       //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
+                       //- maturing spendable output has transaction paying us has been disconnected
+               }
+
+               self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger);
+
+               self.last_block_hash = block_hash;
+       }
+
+       /// Filters a block's `txdata` for transactions spending watched outputs or for any child
+       /// transactions thereof.
+       fn filter_block<'a>(&self, txdata: &TransactionData<'a>) -> Vec<&'a Transaction> {
+               let mut matched_txn = HashSet::new();
+               txdata.iter().filter(|&&(_, tx)| {
+                       let mut matches = self.spends_watched_output(tx);
+                       for input in tx.input.iter() {
+                               if matches { break; }
+                               if matched_txn.contains(&input.previous_output.txid) {
+                                       matches = true;
+                               }
+                       }
+                       if matches {
+                               matched_txn.insert(tx.txid());
+                       }
+                       matches
+               }).map(|(_, tx)| *tx).collect()
+       }
+
+       /// Checks if a given transaction spends any watched outputs.
+       fn spends_watched_output(&self, tx: &Transaction) -> bool {
+               for input in tx.input.iter() {
+                       if let Some(outputs) = self.get_outputs_to_watch().get(&input.previous_output.txid) {
+                               for (idx, _script_pubkey) in outputs.iter().enumerate() {
+                                       if idx == input.previous_output.vout as usize {
+                                               return true;
+                                       }
+                               }
+                       }
+               }
+
+               false
+       }
+
+       fn would_broadcast_at_height<L: Deref>(&self, height: u32, logger: &L) -> bool where L::Target: Logger {
+               // We need to consider all HTLCs which are:
+               //  * in any unrevoked counterparty commitment transaction, as they could broadcast said
+               //    transactions and we'd end up in a race, or
+               //  * are in our latest holder commitment transaction, as this is the thing we will
+               //    broadcast if we go on-chain.
+               // Note that we consider HTLCs which were below dust threshold here - while they don't
+               // strictly imply that we need to fail the channel, we need to go ahead and fail them back
+               // to the source, and if we don't fail the channel we will have to ensure that the next
+               // updates that peer sends us are update_fails, failing the channel if not. It's probably
+               // easier to just fail the channel as this case should be rare enough anyway.
+               macro_rules! scan_commitment {
+                       ($htlcs: expr, $holder_tx: expr) => {
+                               for ref htlc in $htlcs {
+                                       // For inbound HTLCs which we know the preimage for, we have to ensure we hit the
+                                       // chain with enough room to claim the HTLC without our counterparty being able to
+                                       // time out the HTLC first.
+                                       // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
+                                       // concern is being able to claim the corresponding inbound HTLC (on another
+                                       // channel) before it expires. In fact, we don't even really care if our
+                                       // counterparty here claims such an outbound HTLC after it expired as long as we
+                                       // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
+                                       // chain when our counterparty is waiting for expiration to off-chain fail an HTLC
+                                       // we give ourselves a few blocks of headroom after expiration before going
+                                       // on-chain for an expired HTLC.
+                                       // Note that, to avoid a potential attack whereby a node delays claiming an HTLC
+                                       // from us until we've reached the point where we go on-chain with the
+                                       // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
+                                       // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
+                                       //  aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
+                                       //      inbound_cltv == height + CLTV_CLAIM_BUFFER
+                                       //      outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
+                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
+                                       //      CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
+                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
+                                       //  The final, above, condition is checked for statically in channelmanager
+                                       //  with CHECK_CLTV_EXPIRY_SANITY_2.
+                                       let htlc_outbound = $holder_tx == htlc.offered;
+                                       if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
+                                          (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
+                                               log_info!(logger, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
+                                               return true;
+                                       }
+                               }
+                       }
+               }
+
+               scan_commitment!(self.current_holder_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true);
+
+               if let Some(ref txid) = self.current_counterparty_commitment_txid {
+                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
+                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
+                       }
+               }
+               if let Some(ref txid) = self.prev_counterparty_commitment_txid {
+                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
+                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
+                       }
+               }
+
+               false
+       }
+
+       /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
+       /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
+       fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
+               'outer_loop: for input in &tx.input {
+                       let mut payment_data = None;
+                       let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
+                               || (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33);
+                       let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC);
+                       let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC);
+
+                       macro_rules! log_claim {
+                               ($tx_info: expr, $holder_tx: expr, $htlc: expr, $source_avail: expr) => {
+                                       // We found the output in question, but aren't failing it backwards
+                                       // as we have no corresponding source and no valid counterparty commitment txid
+                                       // to try a weak source binding with same-hash, same-value still-valid offered HTLC.
+                                       // This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction.
+                                       let outbound_htlc = $holder_tx == $htlc.offered;
+                                       if ($holder_tx && revocation_sig_claim) ||
+                                                       (outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) {
+                                               log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
+                                                       $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
+                                                       if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
+                                                       if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" });
+                                       } else {
+                                               log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
+                                                       $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
+                                                       if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
+                                                       if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" });
+                                       }
+                               }
+                       }
+
+                       macro_rules! check_htlc_valid_counterparty {
+                               ($counterparty_txid: expr, $htlc_output: expr) => {
+                                       if let Some(txid) = $counterparty_txid {
+                                               for &(ref pending_htlc, ref pending_source) in self.counterparty_claimable_outpoints.get(&txid).unwrap() {
+                                                       if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
+                                                               if let &Some(ref source) = pending_source {
+                                                                       log_claim!("revoked counterparty commitment tx", false, pending_htlc, true);
+                                                                       payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
+                                                                       break;
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+
+                       macro_rules! scan_commitment {
+                               ($htlcs: expr, $tx_info: expr, $holder_tx: expr) => {
+                                       for (ref htlc_output, source_option) in $htlcs {
+                                               if Some(input.previous_output.vout) == htlc_output.transaction_output_index {
+                                                       if let Some(ref source) = source_option {
+                                                               log_claim!($tx_info, $holder_tx, htlc_output, true);
+                                                               // We have a resolution of an HTLC either from one of our latest
+                                                               // holder commitment transactions or an unrevoked counterparty commitment
+                                                               // transaction. This implies we either learned a preimage, the HTLC
+                                                               // has timed out, or we screwed up. In any case, we should now
+                                                               // resolve the source HTLC with the original sender.
+                                                               payment_data = Some(((*source).clone(), htlc_output.payment_hash));
+                                                       } else if !$holder_tx {
+                                                                       check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
+                                                               if payment_data.is_none() {
+                                                                       check_htlc_valid_counterparty!(self.prev_counterparty_commitment_txid, htlc_output);
+                                                               }
+                                                       }
+                                                       if payment_data.is_none() {
+                                                               log_claim!($tx_info, $holder_tx, htlc_output, false);
+                                                               continue 'outer_loop;
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+
+                       if input.previous_output.txid == self.current_holder_commitment_tx.txid {
+                               scan_commitment!(self.current_holder_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
+                                       "our latest holder commitment tx", true);
+                       }
+                       if let Some(ref prev_holder_signed_commitment_tx) = self.prev_holder_signed_commitment_tx {
+                               if input.previous_output.txid == prev_holder_signed_commitment_tx.txid {
+                                       scan_commitment!(prev_holder_signed_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
+                                               "our previous holder commitment tx", true);
+                               }
+                       }
+                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(&input.previous_output.txid) {
+                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))),
+                                       "counterparty commitment tx", false);
+                       }
+
+                       // Check that scan_commitment, above, decided there is some source worth relaying an
+                       // HTLC resolution backwards to and figure out whether we learned a preimage from it.
+                       if let Some((source, payment_hash)) = payment_data {
+                               let mut payment_preimage = PaymentPreimage([0; 32]);
+                               if accepted_preimage_claim {
+                                       if !self.pending_monitor_events.iter().any(
+                                               |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { upd.source == source } else { false }) {
+                                               payment_preimage.0.copy_from_slice(&input.witness[3]);
+                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
+                                                       source,
+                                                       payment_preimage: Some(payment_preimage),
+                                                       payment_hash
+                                               }));
+                                       }
+                               } else if offered_preimage_claim {
+                                       if !self.pending_monitor_events.iter().any(
+                                               |update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
+                                                       upd.source == source
+                                               } else { false }) {
+                                               payment_preimage.0.copy_from_slice(&input.witness[1]);
+                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
+                                                       source,
+                                                       payment_preimage: Some(payment_preimage),
+                                                       payment_hash
+                                               }));
+                                       }
+                               } else {
+                                       log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
+                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+                                               hash_map::Entry::Occupied(mut entry) => {
+                                                       let e = entry.get_mut();
+                                                       e.retain(|ref event| {
+                                                               match **event {
+                                                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
+                                                                               return htlc_update.0 != source
+                                                                       },
+                                                                       _ => true
+                                                               }
+                                                       });
+                                                       e.push(OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)});
+                                               }
+                                               hash_map::Entry::Vacant(entry) => {
+                                                       entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)}]);
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+
+       /// Check if any transaction broadcasted is paying fund back to some address we can assume to own
+       fn is_paying_spendable_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
+               let mut spendable_output = None;
+               for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us
+                       if i > ::std::u16::MAX as usize {
+                               // While it is possible that an output exists on chain which is greater than the
+                               // 2^16th output in a given transaction, this is only possible if the output is not
+                               // in a lightning transaction and was instead placed there by some third party who
+                               // wishes to give us money for no reason.
+                               // Namely, any lightning transactions which we pre-sign will never have anywhere
+                               // near 2^16 outputs both because such transactions must have ~2^16 outputs who's
+                               // scripts are not longer than one byte in length and because they are inherently
+                               // non-standard due to their size.
+                               // Thus, it is completely safe to ignore such outputs, and while it may result in
+                               // us ignoring non-lightning fund to us, that is only possible if someone fills
+                               // nearly a full block with garbage just to hit this case.
+                               continue;
+                       }
+                       if outp.script_pubkey == self.destination_script {
+                               spendable_output =  Some(SpendableOutputDescriptor::StaticOutput {
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
+                                       output: outp.clone(),
+                               });
+                               break;
+                       } else if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
+                               if broadcasted_holder_revokable_script.0 == outp.script_pubkey {
+                                       spendable_output =  Some(SpendableOutputDescriptor::DynamicOutputP2WSH {
+                                               outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
+                                               per_commitment_point: broadcasted_holder_revokable_script.1,
+                                               to_self_delay: self.on_holder_tx_csv,
+                                               output: outp.clone(),
+                                               key_derivation_params: self.keys.key_derivation_params(),
+                                               revocation_pubkey: broadcasted_holder_revokable_script.2.clone(),
+                                       });
+                                       break;
+                               }
+                       } else if self.counterparty_payment_script == outp.script_pubkey {
+                               spendable_output = Some(SpendableOutputDescriptor::StaticOutputCounterpartyPayment {
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
+                                       output: outp.clone(),
+                                       key_derivation_params: self.keys.key_derivation_params(),
+                               });
+                               break;
+                       } else if outp.script_pubkey == self.shutdown_script {
+                               spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
+                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
+                                       output: outp.clone(),
+                               });
+                       }
+               }
+               if let Some(spendable_output) = spendable_output {
+                       log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), height + ANTI_REORG_DELAY - 1);
+                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+                               hash_map::Entry::Occupied(mut entry) => {
+                                       let e = entry.get_mut();
+                                       e.push(OnchainEvent::MaturingOutput { descriptor: spendable_output });
+                               }
+                               hash_map::Entry::Vacant(entry) => {
+                                       entry.insert(vec![OnchainEvent::MaturingOutput { descriptor: spendable_output }]);
+                               }
+                       }
+               }
+       }
+}
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor<ChanSigner>) {
+       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+               macro_rules! unwrap_obj {
+                       ($key: expr) => {
+                               match $key {
+                                       Ok(res) => res,
+                                       Err(_) => return Err(DecodeError::InvalidValue),
+                               }
+                       }
+               }
+
+               let _ver: u8 = Readable::read(reader)?;
+               let min_ver: u8 = Readable::read(reader)?;
+               if min_ver > SERIALIZATION_VERSION {
+                       return Err(DecodeError::UnknownVersion);
+               }
+
+               let latest_update_id: u64 = Readable::read(reader)?;
+               let commitment_transaction_number_obscure_factor = <U48 as Readable>::read(reader)?.0;
+
+               let destination_script = Readable::read(reader)?;
+               let broadcasted_holder_revokable_script = match <u8 as Readable>::read(reader)? {
+                       0 => {
+                               let revokable_address = Readable::read(reader)?;
+                               let per_commitment_point = Readable::read(reader)?;
+                               let revokable_script = Readable::read(reader)?;
+                               Some((revokable_address, per_commitment_point, revokable_script))
+                       },
+                       1 => { None },
+                       _ => return Err(DecodeError::InvalidValue),
+               };
+               let counterparty_payment_script = Readable::read(reader)?;
+               let shutdown_script = Readable::read(reader)?;
+
+               let keys = Readable::read(reader)?;
+               // Technically this can fail and serialize fail a round-trip, but only for serialization of
+               // barely-init'd ChannelMonitors that we can't do anything with.
+               let outpoint = OutPoint {
+                       txid: Readable::read(reader)?,
+                       index: Readable::read(reader)?,
+               };
+               let funding_info = (outpoint, Readable::read(reader)?);
+               let current_counterparty_commitment_txid = Readable::read(reader)?;
+               let prev_counterparty_commitment_txid = Readable::read(reader)?;
+
+               let counterparty_tx_cache = Readable::read(reader)?;
+               let funding_redeemscript = Readable::read(reader)?;
+               let channel_value_satoshis = Readable::read(reader)?;
+
+               let their_cur_revocation_points = {
+                       let first_idx = <U48 as Readable>::read(reader)?.0;
+                       if first_idx == 0 {
+                               None
+                       } else {
+                               let first_point = Readable::read(reader)?;
+                               let second_point_slice: [u8; 33] = Readable::read(reader)?;
+                               if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 {
+                                       Some((first_idx, first_point, None))
+                               } else {
+                                       Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice)))))
+                               }
+                       }
+               };
+
+               let on_holder_tx_csv: u16 = Readable::read(reader)?;
+
+               let commitment_secrets = Readable::read(reader)?;
+
+               macro_rules! read_htlc_in_commitment {
+                       () => {
+                               {
+                                       let offered: bool = Readable::read(reader)?;
+                                       let amount_msat: u64 = Readable::read(reader)?;
+                                       let cltv_expiry: u32 = Readable::read(reader)?;
+                                       let payment_hash: PaymentHash = Readable::read(reader)?;
+                                       let transaction_output_index: Option<u32> = Readable::read(reader)?;
+
+                                       HTLCOutputInCommitment {
+                                               offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index
+                                       }
+                               }
+                       }
+               }
+
+               let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
+               let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
+               for _ in 0..counterparty_claimable_outpoints_len {
+                       let txid: Txid = Readable::read(reader)?;
+                       let htlcs_count: u64 = Readable::read(reader)?;
+                       let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
+                       for _ in 0..htlcs_count {
+                               htlcs.push((read_htlc_in_commitment!(), <Option<HTLCSource> as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o))));
+                       }
+                       if let Some(_) = counterparty_claimable_outpoints.insert(txid, htlcs) {
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+
+               let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
+               let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
+               for _ in 0..counterparty_commitment_txn_on_chain_len {
+                       let txid: Txid = Readable::read(reader)?;
+                       let commitment_number = <U48 as Readable>::read(reader)?.0;
+                       let outputs_count = <u64 as Readable>::read(reader)?;
+                       let mut outputs = Vec::with_capacity(cmp::min(outputs_count as usize, MAX_ALLOC_SIZE / 8));
+                       for _ in 0..outputs_count {
+                               outputs.push(Readable::read(reader)?);
+                       }
+                       if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, (commitment_number, outputs)) {
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+
+               let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
+               let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
+               for _ in 0..counterparty_hash_commitment_number_len {
+                       let payment_hash: PaymentHash = Readable::read(reader)?;
+                       let commitment_number = <U48 as Readable>::read(reader)?.0;
+                       if let Some(_) = counterparty_hash_commitment_number.insert(payment_hash, commitment_number) {
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+
+               macro_rules! read_holder_tx {
+                       () => {
+                               {
+                                       let txid = Readable::read(reader)?;
+                                       let revocation_key = Readable::read(reader)?;
+                                       let a_htlc_key = Readable::read(reader)?;
+                                       let b_htlc_key = Readable::read(reader)?;
+                                       let delayed_payment_key = Readable::read(reader)?;
+                                       let per_commitment_point = Readable::read(reader)?;
+                                       let feerate_per_kw: u32 = Readable::read(reader)?;
+
+                                       let htlcs_len: u64 = Readable::read(reader)?;
+                                       let mut htlcs = Vec::with_capacity(cmp::min(htlcs_len as usize, MAX_ALLOC_SIZE / 128));
+                                       for _ in 0..htlcs_len {
+                                               let htlc = read_htlc_in_commitment!();
+                                               let sigs = match <u8 as Readable>::read(reader)? {
+                                                       0 => None,
+                                                       1 => Some(Readable::read(reader)?),
+                                                       _ => return Err(DecodeError::InvalidValue),
+                                               };
+                                               htlcs.push((htlc, sigs, Readable::read(reader)?));
+                                       }
+
+                                       HolderSignedTx {
+                                               txid,
+                                               revocation_key, a_htlc_key, b_htlc_key, delayed_payment_key, per_commitment_point, feerate_per_kw,
+                                               htlc_outputs: htlcs
+                                       }
+                               }
+                       }
+               }
+
+               let prev_holder_signed_commitment_tx = match <u8 as Readable>::read(reader)? {
+                       0 => None,
+                       1 => {
+                               Some(read_holder_tx!())
+                       },
+                       _ => return Err(DecodeError::InvalidValue),
+               };
+               let current_holder_commitment_tx = read_holder_tx!();
+
+               let current_counterparty_commitment_number = <U48 as Readable>::read(reader)?.0;
+               let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
+
+               let payment_preimages_len: u64 = Readable::read(reader)?;
+               let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
+               for _ in 0..payment_preimages_len {
+                       let preimage: PaymentPreimage = Readable::read(reader)?;
+                       let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
+                       if let Some(_) = payment_preimages.insert(hash, preimage) {
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+
+               let pending_monitor_events_len: u64 = Readable::read(reader)?;
+               let mut pending_monitor_events = Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)));
+               for _ in 0..pending_monitor_events_len {
+                       let ev = match <u8 as Readable>::read(reader)? {
+                               0 => MonitorEvent::HTLCEvent(Readable::read(reader)?),
+                               1 => MonitorEvent::CommitmentTxBroadcasted(funding_info.0),
+                               _ => return Err(DecodeError::InvalidValue)
+                       };
+                       pending_monitor_events.push(ev);
+               }
+
+               let pending_events_len: u64 = Readable::read(reader)?;
+               let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
+               for _ in 0..pending_events_len {
+                       if let Some(event) = MaybeReadable::read(reader)? {
+                               pending_events.push(event);
+                       }
+               }
+
+               let last_block_hash: BlockHash = Readable::read(reader)?;
+
+               let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
+               let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
+               for _ in 0..waiting_threshold_conf_len {
+                       let height_target = Readable::read(reader)?;
+                       let events_len: u64 = Readable::read(reader)?;
+                       let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
+                       for _ in 0..events_len {
+                               let ev = match <u8 as Readable>::read(reader)? {
+                                       0 => {
+                                               let htlc_source = Readable::read(reader)?;
+                                               let hash = Readable::read(reader)?;
+                                               OnchainEvent::HTLCUpdate {
+                                                       htlc_update: (htlc_source, hash)
+                                               }
+                                       },
+                                       1 => {
+                                               let descriptor = Readable::read(reader)?;
+                                               OnchainEvent::MaturingOutput {
+                                                       descriptor
+                                               }
+                                       },
+                                       _ => return Err(DecodeError::InvalidValue),
+                               };
+                               events.push(ev);
+                       }
+                       onchain_events_waiting_threshold_conf.insert(height_target, events);
+               }
+
+               let outputs_to_watch_len: u64 = Readable::read(reader)?;
+               let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<Vec<Script>>())));
+               for _ in 0..outputs_to_watch_len {
+                       let txid = Readable::read(reader)?;
+                       let outputs_len: u64 = Readable::read(reader)?;
+                       let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Script>()));
+                       for _ in 0..outputs_len {
+                               outputs.push(Readable::read(reader)?);
+                       }
+                       if let Some(_) = outputs_to_watch.insert(txid, outputs) {
+                               return Err(DecodeError::InvalidValue);
+                       }
+               }
+               let onchain_tx_handler = Readable::read(reader)?;
+
+               let lockdown_from_offchain = Readable::read(reader)?;
+               let holder_tx_signed = Readable::read(reader)?;
+
+               Ok((last_block_hash.clone(), ChannelMonitor {
+                       latest_update_id,
+                       commitment_transaction_number_obscure_factor,
+
+                       destination_script,
+                       broadcasted_holder_revokable_script,
+                       counterparty_payment_script,
+                       shutdown_script,
+
+                       keys,
+                       funding_info,
+                       current_counterparty_commitment_txid,
+                       prev_counterparty_commitment_txid,
+
+                       counterparty_tx_cache,
+                       funding_redeemscript,
+                       channel_value_satoshis,
+                       their_cur_revocation_points,
+
+                       on_holder_tx_csv,
+
+                       commitment_secrets,
+                       counterparty_claimable_outpoints,
+                       counterparty_commitment_txn_on_chain,
+                       counterparty_hash_commitment_number,
+
+                       prev_holder_signed_commitment_tx,
+                       current_holder_commitment_tx,
+                       current_counterparty_commitment_number,
+                       current_holder_commitment_number,
+
+                       payment_preimages,
+                       pending_monitor_events,
+                       pending_events,
+
+                       onchain_events_waiting_threshold_conf,
+                       outputs_to_watch,
+
+                       onchain_tx_handler,
+
+                       lockdown_from_offchain,
+                       holder_tx_signed,
+
+                       last_block_hash,
+                       secp_ctx: Secp256k1::new(),
+               }))
+       }
+}
+
+#[cfg(test)]
+mod tests {
+       use bitcoin::blockdata::script::{Script, Builder};
+       use bitcoin::blockdata::opcodes;
+       use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
+       use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+       use bitcoin::util::bip143;
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
+       use bitcoin::hashes::hex::FromHex;
+       use bitcoin::hash_types::Txid;
+       use hex;
+       use chain::channelmonitor::ChannelMonitor;
+       use chain::transaction::OutPoint;
+       use ln::channelmanager::{PaymentPreimage, PaymentHash};
+       use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
+       use ln::chan_utils;
+       use ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction};
+       use util::test_utils::TestLogger;
+       use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+       use bitcoin::secp256k1::Secp256k1;
+       use std::sync::Arc;
+       use chain::keysinterface::InMemoryChannelKeys;
+
+       #[test]
+       fn test_prune_preimages() {
+               let secp_ctx = Secp256k1::new();
+               let logger = Arc::new(TestLogger::new());
+
+               let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+               let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+
+               let mut preimages = Vec::new();
+               {
+                       for i in 0..20 {
+                               let preimage = PaymentPreimage([i; 32]);
+                               let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
+                               preimages.push((preimage, hash));
+                       }
+               }
+
+               macro_rules! preimages_slice_to_htlc_outputs {
+                       ($preimages_slice: expr) => {
+                               {
+                                       let mut res = Vec::new();
+                                       for (idx, preimage) in $preimages_slice.iter().enumerate() {
+                                               res.push((HTLCOutputInCommitment {
+                                                       offered: true,
+                                                       amount_msat: 0,
+                                                       cltv_expiry: 0,
+                                                       payment_hash: preimage.1.clone(),
+                                                       transaction_output_index: Some(idx as u32),
+                                               }, None));
+                                       }
+                                       res
+                               }
+                       }
+               }
+               macro_rules! preimages_to_holder_htlcs {
+                       ($preimages_slice: expr) => {
+                               {
+                                       let mut inp = preimages_slice_to_htlc_outputs!($preimages_slice);
+                                       let res: Vec<_> = inp.drain(..).map(|e| { (e.0, None, e.1) }).collect();
+                                       res
+                               }
+                       }
+               }
+
+               macro_rules! test_preimages_exist {
+                       ($preimages_slice: expr, $monitor: expr) => {
+                               for preimage in $preimages_slice {
+                                       assert!($monitor.payment_preimages.contains_key(&preimage.1));
+                               }
+                       }
+               }
+
+               let keys = InMemoryChannelKeys::new(
+                       &secp_ctx,
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       SecretKey::from_slice(&[41; 32]).unwrap(),
+                       [41; 32],
+                       0,
+                       (0, 0)
+               );
+
+               // Prune with one old state and a holder commitment tx holding a few overlaps with the
+               // old state.
+               let mut monitor = ChannelMonitor::new(keys,
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0, &Script::new(),
+                       (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()),
+                       10, Script::new(), 46, 0, HolderCommitmentTransaction::dummy());
+
+               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
+               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
+               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
+               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
+               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
+               for &(ref preimage, ref hash) in preimages.iter() {
+                       monitor.provide_payment_preimage(hash, preimage);
+               }
+
+               // Now provide a secret, pruning preimages 10-15
+               let mut secret = [0; 32];
+               secret[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+               monitor.provide_secret(281474976710655, secret.clone()).unwrap();
+               assert_eq!(monitor.payment_preimages.len(), 15);
+               test_preimages_exist!(&preimages[0..10], monitor);
+               test_preimages_exist!(&preimages[15..20], monitor);
+
+               // Now provide a further secret, pruning preimages 15-17
+               secret[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+               monitor.provide_secret(281474976710654, secret.clone()).unwrap();
+               assert_eq!(monitor.payment_preimages.len(), 13);
+               test_preimages_exist!(&preimages[0..10], monitor);
+               test_preimages_exist!(&preimages[17..20], monitor);
+
+               // Now update holder commitment tx info, pruning only element 18 as we still care about the
+               // previous commitment tx's preimages too
+               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();
+               secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+               monitor.provide_secret(281474976710653, secret.clone()).unwrap();
+               assert_eq!(monitor.payment_preimages.len(), 12);
+               test_preimages_exist!(&preimages[0..10], monitor);
+               test_preimages_exist!(&preimages[18..20], monitor);
+
+               // But if we do it again, we'll prune 5-10
+               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..3])).unwrap();
+               secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+               monitor.provide_secret(281474976710652, secret.clone()).unwrap();
+               assert_eq!(monitor.payment_preimages.len(), 5);
+               test_preimages_exist!(&preimages[0..5], monitor);
+       }
+
+       #[test]
+       fn test_claim_txn_weight_computation() {
+               // We test Claim txn weight, knowing that we want expected weigth and
+               // not actual case to avoid sigs and time-lock delays hell variances.
+
+               let secp_ctx = Secp256k1::new();
+               let privkey = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+               let pubkey = PublicKey::from_secret_key(&secp_ctx, &privkey);
+               let mut sum_actual_sigs = 0;
+
+               macro_rules! sign_input {
+                       ($sighash_parts: expr, $idx: expr, $amount: expr, $input_type: expr, $sum_actual_sigs: expr) => {
+                               let htlc = HTLCOutputInCommitment {
+                                       offered: if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::OfferedHTLC { true } else { false },
+                                       amount_msat: 0,
+                                       cltv_expiry: 2 << 16,
+                                       payment_hash: PaymentHash([1; 32]),
+                                       transaction_output_index: Some($idx as u32),
+                               };
+                               let redeem_script = if *$input_type == InputDescriptors::RevokedOutput { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
+                               let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]);
+                               let sig = secp_ctx.sign(&sighash, &privkey);
+                               $sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec());
+                               $sighash_parts.access_witness($idx)[0].push(SigHashType::All as u8);
+                               sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len();
+                               if *$input_type == InputDescriptors::RevokedOutput {
+                                       $sighash_parts.access_witness($idx).push(vec!(1));
+                               } else if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::RevokedReceivedHTLC {
+                                       $sighash_parts.access_witness($idx).push(pubkey.clone().serialize().to_vec());
+                               } else if *$input_type == InputDescriptors::ReceivedHTLC {
+                                       $sighash_parts.access_witness($idx).push(vec![0]);
+                               } else {
+                                       $sighash_parts.access_witness($idx).push(PaymentPreimage([1; 32]).0.to_vec());
+                               }
+                               $sighash_parts.access_witness($idx).push(redeem_script.into_bytes());
+                               println!("witness[0] {}", $sighash_parts.access_witness($idx)[0].len());
+                               println!("witness[1] {}", $sighash_parts.access_witness($idx)[1].len());
+                               println!("witness[2] {}", $sighash_parts.access_witness($idx)[2].len());
+                       }
+               }
+
+               let script_pubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script();
+               let txid = Txid::from_hex("56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d").unwrap();
+
+               // Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs
+               let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+               for i in 0..4 {
+                       claim_tx.input.push(TxIn {
+                               previous_output: BitcoinOutPoint {
+                                       txid,
+                                       vout: i,
+                               },
+                               script_sig: Script::new(),
+                               sequence: 0xfffffffd,
+                               witness: Vec::new(),
+                       });
+               }
+               claim_tx.output.push(TxOut {
+                       script_pubkey: script_pubkey.clone(),
+                       value: 0,
+               });
+               let base_weight = claim_tx.get_weight();
+               let inputs_des = vec![InputDescriptors::RevokedOutput, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedReceivedHTLC];
+               {
+                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                       for (idx, inp) in inputs_des.iter().enumerate() {
+                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+                       }
+               }
+               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]),  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+
+               // Claim tx with 1 offered HTLCs, 3 received HTLCs
+               claim_tx.input.clear();
+               sum_actual_sigs = 0;
+               for i in 0..4 {
+                       claim_tx.input.push(TxIn {
+                               previous_output: BitcoinOutPoint {
+                                       txid,
+                                       vout: i,
+                               },
+                               script_sig: Script::new(),
+                               sequence: 0xfffffffd,
+                               witness: Vec::new(),
+                       });
+               }
+               let base_weight = claim_tx.get_weight();
+               let inputs_des = vec![InputDescriptors::OfferedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC];
+               {
+                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                       for (idx, inp) in inputs_des.iter().enumerate() {
+                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+                       }
+               }
+               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]),  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+
+               // Justice tx with 1 revoked HTLC-Success tx output
+               claim_tx.input.clear();
+               sum_actual_sigs = 0;
+               claim_tx.input.push(TxIn {
+                       previous_output: BitcoinOutPoint {
+                               txid,
+                               vout: 0,
+                       },
+                       script_sig: Script::new(),
+                       sequence: 0xfffffffd,
+                       witness: Vec::new(),
+               });
+               let base_weight = claim_tx.get_weight();
+               let inputs_des = vec![InputDescriptors::RevokedOutput];
+               {
+                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
+                       for (idx, inp) in inputs_des.iter().enumerate() {
+                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
+                       }
+               }
+               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_des.len() - sum_actual_sigs));
+       }
+
+       // Further testing is done in the ChannelManager integration tests.
+}
index 9968bc6c07284d13e556367a491571c1eec01a10..f7ddedef3a3e99b45f3fe8d35399fdce49b6c976 100644 (file)
@@ -9,6 +9,120 @@
 
 //! Structs and traits which allow other parts of rust-lightning to interact with the blockchain.
 
+use bitcoin::blockdata::script::Script;
+use bitcoin::blockdata::transaction::TxOut;
+use bitcoin::hash_types::{BlockHash, Txid};
+
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, MonitorEvent};
+use chain::keysinterface::ChannelKeys;
+use chain::transaction::OutPoint;
+
 pub mod chaininterface;
+pub mod chainmonitor;
+pub mod channelmonitor;
 pub mod transaction;
 pub mod keysinterface;
+
+/// The `Access` trait defines behavior for accessing chain data and state, such as blocks and
+/// UTXOs.
+pub trait Access: Send + Sync {
+       /// Returns the transaction output of a funding transaction encoded by [`short_channel_id`].
+       /// Returns an error if `genesis_hash` is for a different chain or if such a transaction output
+       /// is unknown.
+       ///
+       /// [`short_channel_id`]: https://github.com/lightningnetwork/lightning-rfc/blob/master/07-routing-gossip.md#definition-of-short_channel_id
+       fn get_utxo(&self, genesis_hash: &BlockHash, short_channel_id: u64) -> Result<TxOut, AccessError>;
+}
+
+/// An error when accessing the chain via [`Access`].
+///
+/// [`Access`]: trait.Access.html
+#[derive(Clone)]
+pub enum AccessError {
+       /// The requested chain is unknown.
+       UnknownChain,
+
+       /// The requested transaction doesn't exist or hasn't confirmed.
+       UnknownTx,
+}
+
+/// The `Watch` trait defines behavior for watching on-chain activity pertaining to channels as
+/// blocks are connected and disconnected.
+///
+/// Each channel is associated with a [`ChannelMonitor`]. Implementations of this trait are
+/// responsible for maintaining a set of monitors such that they can be updated accordingly as
+/// channel state changes and HTLCs are resolved. See method documentation for specific
+/// requirements.
+///
+/// Implementations **must** ensure that updates are successfully applied and persisted upon method
+/// completion. If an update fails with a [`PermanentFailure`], then it must immediately shut down
+/// without taking any further action such as persisting the current state.
+///
+/// If an implementation maintains multiple instances of a channel's monitor (e.g., by storing
+/// backup copies), then it must ensure that updates are applied across all instances. Otherwise, it
+/// could result in a revoked transaction being broadcast, allowing the counterparty to claim all
+/// funds in the channel. See [`ChannelMonitorUpdateErr`] for more details about how to handle
+/// multiple instances.
+///
+/// [`ChannelMonitor`]: channelmonitor/struct.ChannelMonitor.html
+/// [`ChannelMonitorUpdateErr`]: channelmonitor/enum.ChannelMonitorUpdateErr.html
+/// [`PermanentFailure`]: channelmonitor/enum.ChannelMonitorUpdateErr.html#variant.PermanentFailure
+pub trait Watch: Send + Sync {
+       /// Keys needed by monitors for creating and signing transactions.
+       type Keys: ChannelKeys;
+
+       /// Watches a channel identified by `funding_txo` using `monitor`.
+       ///
+       /// Implementations are responsible for watching the chain for the funding transaction along
+       /// with any spends of outputs returned by [`get_outputs_to_watch`]. In practice, this means
+       /// calling [`block_connected`] and [`block_disconnected`] on the monitor.
+       ///
+       /// [`get_outputs_to_watch`]: channelmonitor/struct.ChannelMonitor.html#method.get_outputs_to_watch
+       /// [`block_connected`]: channelmonitor/struct.ChannelMonitor.html#method.block_connected
+       /// [`block_disconnected`]: channelmonitor/struct.ChannelMonitor.html#method.block_disconnected
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor<Self::Keys>) -> Result<(), ChannelMonitorUpdateErr>;
+
+       /// Updates a channel identified by `funding_txo` by applying `update` to its monitor.
+       ///
+       /// Implementations must call [`update_monitor`] with the given update. See
+       /// [`ChannelMonitorUpdateErr`] for invariants around returning an error.
+       ///
+       /// [`update_monitor`]: channelmonitor/struct.ChannelMonitor.html#method.update_monitor
+       /// [`ChannelMonitorUpdateErr`]: channelmonitor/enum.ChannelMonitorUpdateErr.html
+       fn update_channel(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
+
+       /// Returns any monitor events since the last call. Subsequent calls must only return new
+       /// events.
+       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent>;
+}
+
+/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to
+/// channels.
+///
+/// This is useful in order to have a [`Watch`] implementation convey to a chain source which
+/// transactions to be notified of. Notification may take the form of pre-filtering blocks or, in
+/// the case of [BIP 157]/[BIP 158], only fetching a block if the compact filter matches. If
+/// receiving full blocks from a chain source, any further filtering is unnecessary.
+///
+/// After an output has been registered, subsequent block retrievals from the chain source must not
+/// exclude any transactions matching the new criteria nor any in-block descendants of such
+/// transactions.
+///
+/// Note that use as part of a [`Watch`] implementation involves reentrancy. Therefore, the `Filter`
+/// should not block on I/O. Implementations should instead queue the newly monitored data to be
+/// processed later. Then, in order to block until the data has been processed, any `Watch`
+/// invocation that has called the `Filter` must return [`TemporaryFailure`].
+///
+/// [`Watch`]: trait.Watch.html
+/// [`TemporaryFailure`]: channelmonitor/enum.ChannelMonitorUpdateErr.html#variant.TemporaryFailure
+/// [BIP 157]: https://github.com/bitcoin/bips/blob/master/bip-0157.mediawiki
+/// [BIP 158]: https://github.com/bitcoin/bips/blob/master/bip-0158.mediawiki
+pub trait Filter: Send + Sync {
+       /// Registers interest in a transaction with `txid` and having an output with `script_pubkey` as
+       /// a spending condition.
+       fn register_tx(&self, txid: &Txid, script_pubkey: &Script);
+
+       /// Registers interest in spends of a transaction output identified by `outpoint` having
+       /// `script_pubkey` as the spending condition.
+       fn register_output(&self, outpoint: &OutPoint, script_pubkey: &Script);
+}
index 946562bc1781f95f77f62ce4aa252509d77eec10..502eb895b2683e4ad3b7fa5bc34f367111781ede 100644 (file)
@@ -7,10 +7,41 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
-//! Contains simple structs describing parts of transactions on the chain.
+//! Types describing on-chain transactions.
 
 use bitcoin::hash_types::Txid;
 use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::transaction::Transaction;
+
+/// Transaction data where each item consists of a transaction reference paired with the index of
+/// the transaction within a block.
+///
+/// Useful for passing enumerated transactions from a block, possibly filtered, in order to retain
+/// the transaction index.
+///
+/// ```
+/// extern crate bitcoin;
+/// extern crate lightning;
+///
+/// use bitcoin::blockdata::block::Block;
+/// use bitcoin::blockdata::constants::genesis_block;
+/// use bitcoin::network::constants::Network;
+/// use lightning::chain::transaction::TransactionData;
+///
+/// let block = genesis_block(Network::Bitcoin);
+/// let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+/// check_block(&block, &txdata);
+///
+/// fn check_block(block: &Block, txdata: &TransactionData) {
+///    assert_eq!(block.txdata.len(), 1);
+///    assert_eq!(txdata.len(), 1);
+///
+///    let (index, tx) = txdata[0];
+///    assert_eq!(index, 0);
+///    assert_eq!(tx, &block.txdata[0]);
+/// }
+/// ```
+pub type TransactionData<'a> = [(usize, &'a Transaction)];
 
 /// A reference to a transaction output.
 ///
index 8133c1a59ca0aa924237f1b5c9961d8638a3ca1e..540155a3afbbe9bcbda72fd5d5208887354e3e0b 100644 (file)
@@ -81,7 +81,7 @@ pub fn build_commitment_secret(commitment_seed: &[u8; 32], idx: u64) -> [u8; 32]
 /// Allows us to keep track of all of the revocation secrets of counterarties in just 50*32 bytes
 /// or so.
 #[derive(Clone)]
-pub(super) struct CounterpartyCommitmentSecrets {
+pub(crate) struct CounterpartyCommitmentSecrets {
        old_secrets: [([u8; 32], u64); 49],
 }
 
@@ -97,7 +97,7 @@ impl PartialEq for CounterpartyCommitmentSecrets {
 }
 
 impl CounterpartyCommitmentSecrets {
-       pub(super) fn new() -> Self {
+       pub(crate) fn new() -> Self {
                Self { old_secrets: [([0; 32], 1 << 48); 49], }
        }
 
@@ -111,7 +111,7 @@ impl CounterpartyCommitmentSecrets {
                48
        }
 
-       pub(super) fn get_min_seen_secret(&self) -> u64 {
+       pub(crate) fn get_min_seen_secret(&self) -> u64 {
                //TODO This can be optimized?
                let mut min = 1 << 48;
                for &(_, idx) in self.old_secrets.iter() {
@@ -123,7 +123,7 @@ impl CounterpartyCommitmentSecrets {
        }
 
        #[inline]
-       pub(super) fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
+       fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
                let mut res: [u8; 32] = secret;
                for i in 0..bits {
                        let bitpos = bits - 1 - i;
@@ -135,7 +135,7 @@ impl CounterpartyCommitmentSecrets {
                res
        }
 
-       pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), ()> {
+       pub(crate) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), ()> {
                let pos = Self::place_secret(idx);
                for i in 0..pos {
                        let (old_secret, old_idx) = self.old_secrets[i as usize];
@@ -151,7 +151,7 @@ impl CounterpartyCommitmentSecrets {
        }
 
        /// Can only fail if idx is < get_min_seen_secret
-       pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
+       pub(crate) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
                for i in 0..self.old_secrets.len() {
                        if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
                                return Some(Self::derive_secret(self.old_secrets[i].0, i as u8, idx))
index bf2f8c69d31073965d7db722d66c0217b84e29f6..e6eb9e6a24aadbc5cb08cfc0423201d969333f52 100644 (file)
@@ -12,9 +12,9 @@
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
 
+use chain::channelmonitor::ChannelMonitorUpdateErr;
 use chain::transaction::OutPoint;
 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
-use ln::channelmonitor::ChannelMonitorUpdateErr;
 use ln::features::InitFeatures;
 use ln::msgs;
 use ln::msgs::{ChannelMessageHandler, ErrorAction, RoutingMessageHandler};
@@ -41,7 +41,7 @@ fn test_simple_monitor_permanent_update_fail() {
 
        let (_, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
        unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable {..}, {});
@@ -76,7 +76,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        {
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
@@ -95,8 +95,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -125,7 +125,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Now set it to failed again...
        let (_, payment_hash_2) = get_payment_preimage_hash!(&nodes[0]);
        {
-               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+               *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
@@ -191,7 +191,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // Now try to send a second payment which will fail to send
        let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
        {
-               *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+               *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
                let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap();
                unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash_2, &None), false, APIError::MonitorUpdateFailed, {});
@@ -245,8 +245,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        }
 
        // Now fix monitor updating...
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -532,15 +532,15 @@ fn test_monitor_update_fail_cs() {
        let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        let responses = nodes[1].node.get_and_clear_pending_msg_events();
@@ -563,7 +563,7 @@ fn test_monitor_update_fail_cs() {
                        assert!(updates.update_fee.is_none());
                        assert_eq!(*node_id, nodes[0].node.get_our_node_id());
 
-                       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+                       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
                        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
                        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -573,8 +573,8 @@ fn test_monitor_update_fail_cs() {
                _ => panic!("Unexpected event"),
        }
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -622,7 +622,7 @@ fn test_monitor_update_fail_no_rebroadcast() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -630,8 +630,8 @@ fn test_monitor_update_fail_no_rebroadcast() {
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 0);
@@ -684,7 +684,7 @@ fn test_monitor_update_raa_while_paused() {
        check_added_monitors!(nodes[1], 1);
        let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
        nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
@@ -696,8 +696,8 @@ fn test_monitor_update_raa_while_paused() {
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -779,7 +779,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        // Now fail monitor updating.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
@@ -797,7 +797,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                check_added_monitors!(nodes[0], 1);
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
        send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
@@ -858,8 +858,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
 
        // Restore monitor updating, ensuring we immediately get a fail-back update and a
        // update_add update.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -1020,7 +1020,7 @@ fn test_monitor_update_fail_reestablish() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
        nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
 
@@ -1049,8 +1049,8 @@ fn test_monitor_update_fail_reestablish() {
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1123,7 +1123,7 @@ fn raa_no_response_awaiting_raa_state() {
        // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
        // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
        // then restore channel monitor updates.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1135,8 +1135,8 @@ fn raa_no_response_awaiting_raa_state() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented responses to RAA".to_string(), 1);
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        // nodes[1] should be AwaitingRAA here!
        check_added_monitors!(nodes[1], 0);
@@ -1228,7 +1228,7 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
        // update.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1257,8 +1257,8 @@ fn claim_while_disconnected_monitor_update_fail() {
 
        // Now un-fail the monitor, which will result in B sending its original commitment update,
        // receiving the commitment update from A, and the resulting commitment dances.
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1342,7 +1342,7 @@ fn monitor_failed_no_reestablish_response() {
                check_added_monitors!(nodes[0], 1);
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
        let payment_event = SendEvent::from_event(events.pop().unwrap());
@@ -1366,8 +1366,8 @@ fn monitor_failed_no_reestablish_response() {
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1445,7 +1445,7 @@ fn first_message_on_recv_ordering() {
        let payment_event = SendEvent::from_event(events.pop().unwrap());
        assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
 
        // Deliver the final RAA for the first payment, which does not require a response. RAAs
        // generally require a commitment_signed, so the fact that we're expecting an opposite response
@@ -1464,8 +1464,8 @@ fn first_message_on_recv_ordering() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1509,7 +1509,7 @@ fn test_monitor_update_fail_claim() {
 
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
        check_added_monitors!(nodes[1], 1);
 
@@ -1523,7 +1523,7 @@ fn test_monitor_update_fail_claim() {
 
        // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
        // paused, so forward shouldn't succeed until we call channel_monitor_updated().
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
 
        let mut events = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
@@ -1556,7 +1556,7 @@ fn test_monitor_update_fail_claim() {
        } else { panic!("Unexpected event!"); }
 
        // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1612,14 +1612,14 @@ fn test_monitor_update_on_pending_forwards() {
        nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
@@ -1675,15 +1675,15 @@ fn monitor_update_claim_fail_no_response() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        assert!(nodes[1].node.claim_funds(payment_preimage_1, &None, 1_000_000));
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1728,20 +1728,20 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
        check_added_monitors!(nodes[0], 0);
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
        let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
        check_added_monitors!(nodes[0], 1);
        assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[0], 0);
 
@@ -1756,11 +1756,11 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        };
 
        if confirm_a_first {
-               confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+               confirm_transaction(&nodes[0], &funding_tx);
                nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id()));
        } else {
                assert!(!restore_b_before_conf);
-               confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+               confirm_transaction(&nodes[1], &funding_tx);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        }
 
@@ -1772,25 +1772,25 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        if !restore_b_before_conf {
-               confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+               confirm_transaction(&nodes[1], &funding_tx);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
        }
 
-       *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       *nodes[1].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        check_added_monitors!(nodes[1], 0);
 
        let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
                nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
 
-               confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+               confirm_transaction(&nodes[0], &funding_tx);
                let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
                (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
        } else {
                if restore_b_before_conf {
-                       confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+                       confirm_transaction(&nodes[1], &funding_tx);
                }
                let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
                (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
@@ -1843,8 +1843,8 @@ fn test_path_paused_mpp() {
 
        // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
        // (for the path 0 -> 2 -> 3) fails.
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       *nodes[0].chan_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
 
        // Now check that we get the right return value, indicating that the first path succeeded but
        // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
@@ -1855,7 +1855,7 @@ fn test_path_paused_mpp() {
                if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
        } else { panic!(); }
        check_added_monitors!(nodes[0], 2);
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+       *nodes[0].chain_monitor.update_ret.lock().unwrap() = Ok(());
 
        // Pass the first HTLC of the payment along to nodes[3].
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@ -1864,7 +1864,7 @@ fn test_path_paused_mpp() {
 
        // And check that, after we successfully update the monitor for chan_2 we can pass the second
        // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
-       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
+       let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
        nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 1);
index dd9170b47e7d8541d88597a358eb7ffa138045a2..21917bb72967e0f17f1b4cd110a9a9cef558b734 100644 (file)
@@ -25,12 +25,12 @@ use bitcoin::secp256k1;
 use ln::features::{ChannelFeatures, InitFeatures};
 use ln::msgs;
 use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
 use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 use ln::chan_utils::{CounterpartyCommitmentSecrets, HolderCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, PreCalculatedTxCreationKeys};
 use ln::chan_utils;
 use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
-use chain::transaction::OutPoint;
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
+use chain::transaction::{OutPoint, TransactionData};
 use chain::keysinterface::{ChannelKeys, KeysInterface};
 use util::transaction_utils;
 use util::ser::{Readable, Writeable, Writer};
@@ -3315,7 +3315,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        ///
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
-       pub fn block_connected(&mut self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[usize]) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
+       pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
                let mut timed_out_htlcs = Vec::new();
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                        match htlc_update {
@@ -3335,7 +3335,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        }
                }
                if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
-                       for (ref tx, index_in_block) in txn_matched.iter().zip(indexes_of_txn_matched) {
+                       for &(index_in_block, tx) in txdata.iter() {
                                if tx.txid() == self.funding_txo.unwrap().txid {
                                        let txo_idx = self.funding_txo.unwrap().index as usize;
                                        if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
@@ -3366,14 +3366,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                                }
                                                        }
                                                }
-                                               if height > 0xff_ff_ff || (*index_in_block) > 0xff_ff_ff {
+                                               if height > 0xff_ff_ff || (index_in_block) > 0xff_ff_ff {
                                                        panic!("Block was bogus - either height 16 million or had > 16 million transactions");
                                                }
                                                assert!(txo_idx <= 0xffff); // txo_idx is a (u16 as usize), so this is just listed here for completeness
                                                self.funding_tx_confirmations = 1;
-                                               self.short_channel_id = Some(((height as u64)          << (5*8)) |
-                                                                            ((*index_in_block as u64) << (2*8)) |
-                                                                            ((txo_idx as u64)         << (0*8)));
+                                               self.short_channel_id = Some(((height as u64)         << (5*8)) |
+                                                                            ((index_in_block as u64) << (2*8)) |
+                                                                            ((txo_idx as u64)        << (0*8)));
                                        }
                                }
                        }
index 59d513b396825eb5e86f7fbf0514691829c4f818..353de9240cc3007a010b73d01f4853e9a2c6d909 100644 (file)
@@ -18,7 +18,6 @@
 //! imply it needs to fail HTLCs/payments/channels it manages).
 
 use bitcoin::blockdata::block::BlockHeader;
-use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
 
@@ -34,10 +33,12 @@ use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
-use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
-use chain::transaction::OutPoint;
+use chain;
+use chain::Watch;
+use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
+use chain::transaction::{OutPoint, TransactionData};
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
@@ -128,7 +129,7 @@ pub(super) enum HTLCForwardInfo {
 
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, PartialEq)]
-pub(super) struct HTLCPreviousHopData {
+pub(crate) struct HTLCPreviousHopData {
        short_channel_id: u64,
        htlc_id: u64,
        incoming_packet_shared_secret: [u8; 32],
@@ -147,7 +148,7 @@ struct ClaimableHTLC {
 
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, PartialEq)]
-pub(super) enum HTLCSource {
+pub(crate) enum HTLCSource {
        PreviousHopData(HTLCPreviousHopData),
        OutboundRoute {
                path: Vec<RouteHop>,
@@ -360,7 +361,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 ///
 /// Note that you can be a bit lazier about writing out ChannelManager than you can be with
 /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from ManyChannelMonitor::add_/update_monitor, with ChannelManagers, writing updates
+/// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
 /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
 /// the serialization process). If the deserialized version is out-of-date compared to the
 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
@@ -384,7 +385,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
 /// you're using lightning-net-tokio.
 pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -393,7 +394,7 @@ pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref,
        default_configuration: UserConfig,
        genesis_hash: BlockHash,
        fee_estimator: F,
-       monitor: M,
+       chain_monitor: M,
        tx_broadcaster: T,
 
        #[cfg(test)]
@@ -697,7 +698,7 @@ macro_rules! maybe_break_monitor_err {
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -717,18 +718,14 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        ///
        /// Users need to notify the new ChannelManager when a new block is connected or
        /// disconnected using its `block_connected` and `block_disconnected` methods.
-       /// However, rather than calling these methods directly, the user should register
-       /// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's
-       /// `block_(dis)connected` methods, which will notify all registered listeners in one
-       /// go.
-       pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
+       pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
                let secp_ctx = Secp256k1::new();
 
                ChannelManager {
                        default_configuration: config.clone(),
                        genesis_hash: genesis_block(network).header.block_hash(),
                        fee_estimator: fee_est,
-                       monitor,
+                       chain_monitor,
                        tx_broadcaster,
 
                        latest_block_height: AtomicUsize::new(current_blockchain_height),
@@ -903,7 +900,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
                        // ignore the result here.
-                       let _ = self.monitor.update_monitor(funding_txo, monitor_update);
+                       let _ = self.chain_monitor.update_channel(funding_txo, monitor_update);
                }
        }
 
@@ -1283,7 +1280,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        }, onion_packet, &self.logger), channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
-                                               if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
                                                        // Note that MonitorUpdateFailed here indicates (per function docs)
                                                        // that we will resend the commitment update once monitor updating
@@ -1679,7 +1676,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                                        continue;
                                                                }
                                                        };
-                                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                                handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                continue;
                                                        }
@@ -2089,7 +2086,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok((msgs, monitor_option)) => {
                                        if let Some(monitor_update) = monitor_option {
-                                               if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        if was_frozen_for_monitor {
                                                                assert!(msgs.is_none());
                                                        } else {
@@ -2173,7 +2170,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// exists largely only to prevent races between this and concurrent update_monitor calls.
        ///
        /// Thus, the anticipated use is, at a high level:
-       ///  1) You register a ManyChannelMonitor with this ChannelManager,
+       ///  1) You register a chain::Watch with this ChannelManager,
        ///  2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
        ///     said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
        ///     any time it cannot do so instantly,
@@ -2314,7 +2311,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
-               let ((funding_msg, monitor_update), mut chan) = {
+               let ((funding_msg, monitor), mut chan) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
@@ -2328,8 +2325,8 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
-               // lock before add_monitor
-               if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().0, monitor_update) {
+               // lock before watch_channel
+               if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
                        match e {
                                ChannelMonitorUpdateErr::PermanentFailure => {
                                        // Note that we reply with the new channel_id in error messages if we gave up on the
@@ -2377,7 +2374,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
-                                       if let Err(e) = self.monitor.add_monitor(chan.get().get_funding_txo().unwrap(), monitor) {
+                                       if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
                                                return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
                                        }
                                        (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
@@ -2647,13 +2644,13 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
                                                Err((Some(update), e)) => {
                                                        assert!(chan.get().is_awaiting_monitor_update());
-                                                       let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), update);
+                                                       let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
                                                        try_chan_entry!(self, Err(e), channel_state, chan);
                                                        unreachable!();
                                                },
                                                Ok(res) => res
                                        };
-                               if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                        return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
                                        //TODO: Rebroadcast closing_signed if present on monitor update restoration
                                }
@@ -2735,7 +2732,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
                                                break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
                                        htlcs_to_fail = htlcs_to_fail_in;
-                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
                                                        assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
                                                        break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
@@ -2860,7 +2857,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
                                        try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
                                if let Some(monitor_update) = monitor_update_opt {
-                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                // channel_reestablish doesn't guarantee the order it returns is sensical
                                                // for the messages it returns, but if we're setting what messages to
                                                // re-transmit on monitor update success, we need to make sure it is sane.
@@ -2947,7 +2944,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        if let Some((update_fee, commitment_signed, monitor_update)) =
                                                        break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
                                        {
-                                               if let Err(_e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        unimplemented!();
                                                }
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -2973,11 +2970,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
        }
 
-       /// Process pending events from the ManyChannelMonitor.
+       /// Process pending events from the `chain::Watch`.
        fn process_pending_monitor_events(&self) {
                let mut failed_channels = Vec::new();
                {
-                       for monitor_event in self.monitor.get_and_clear_pending_monitor_events() {
+                       for monitor_event in self.chain_monitor.release_pending_monitor_events() {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
                                                if let Some(preimage) = htlc_update.payment_preimage {
@@ -3017,7 +3014,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3036,7 +3033,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 }
 
 impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3054,17 +3051,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
-       ChainListener for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<ChanSigner, M, T, K, F, L>
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
-                               L::Target: Logger,
+        L::Target: Logger,
 {
-       fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[usize]) {
+       /// Updates channel state based on transactions seen in a connected block.
+       pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
                let header_hash = header.block_hash();
-               log_trace!(self.logger, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
+               log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
                let _ = self.total_consistency_lock.read().unwrap();
                let mut failed_channels = Vec::new();
                let mut timed_out_htlcs = Vec::new();
@@ -3074,7 +3071,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                        let short_to_id = &mut channel_state.short_to_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
-                               let res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
+                               let res = channel.block_connected(header, txdata, height);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
@@ -3107,7 +3104,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                        return false;
                                }
                                if let Some(funding_txo) = channel.get_funding_txo() {
-                                       for tx in txn_matched {
+                                       for &(_, tx) in txdata.iter() {
                                                for inp in tx.input.iter() {
                                                        if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
                                                                log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
@@ -3173,8 +3170,11 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                }
        }
 
-       /// We force-close the channel without letting our counterparty participate in the shutdown
-       fn block_disconnected(&self, header: &BlockHeader, _: u32) {
+       /// Updates channel state based on a disconnected block.
+       ///
+       /// If necessary, the channel may be force-closed without letting the counterparty participate
+       /// in the shutdown.
+       pub fn block_disconnected(&self, header: &BlockHeader) {
                let _ = self.total_consistency_lock.read().unwrap();
                let mut failed_channels = Vec::new();
                {
@@ -3209,7 +3209,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
 
 impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
        ChannelMessageHandler for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3652,7 +3652,7 @@ impl Readable for HTLCForwardInfo {
 }
 
 impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3732,11 +3732,10 @@ impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref
 /// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
 ///    ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
 /// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
+/// 5) Move the ChannelMonitors into your local chain::Watch.
 /// 6) Disconnect/connect blocks on the ChannelManager.
-/// 7) Register the new ChannelManager with your ChainWatchInterface.
 pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3750,12 +3749,12 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T:
        ///
        /// No calls to the FeeEstimator will be made during deserialization.
        pub fee_estimator: F,
-       /// The ManyChannelMonitor for use in the ChannelManager in the future.
+       /// The chain::Watch for use in the ChannelManager in the future.
        ///
-       /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
+       /// No calls to the chain::Watch will be made during deserialization. It is assumed that
        /// you have deserialized ChannelMonitors separately and will add them to your
-       /// ManyChannelMonitor after deserializing this ChannelManager.
-       pub monitor: M,
+       /// chain::Watch after deserializing this ChannelManager.
+       pub chain_monitor: M,
 
        /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
        /// used to broadcast the latest local commitment transactions of channels which must be
@@ -3785,7 +3784,7 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T:
 
 impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
                T::Target: BroadcasterInterface,
                K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
                F::Target: FeeEstimator,
@@ -3794,10 +3793,10 @@ impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L
        /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
        /// HashMap for you. This is primarily useful for C bindings where it is not practical to
        /// populate a HashMap directly from C.
-       pub fn new(keys_manager: K, fee_estimator: F, monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
+       pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
                        mut channel_monitors: Vec<&'a mut ChannelMonitor<ChanSigner>>) -> Self {
                Self {
-                       keys_manager, fee_estimator, monitor, tx_broadcaster, logger, default_config,
+                       keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
                        channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
                }
        }
@@ -3807,7 +3806,7 @@ impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L
 // SipmleArcChannelManager type:
 impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<ChanSigner, M, T, K, F, L>>)
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3821,7 +3820,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
 
 impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, ChannelManager<ChanSigner, M, T, K, F, L>)
-       where M::Target: ManyChannelMonitor<Keys=ChanSigner>,
+       where M::Target: chain::Watch<Keys=ChanSigner>,
         T::Target: BroadcasterInterface,
         K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
         F::Target: FeeEstimator,
@@ -3933,7 +3932,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
-                       monitor: args.monitor,
+                       chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
 
                        latest_block_height: AtomicUsize::new(latest_block_height as usize),
diff --git a/lightning/src/ln/channelmonitor.rs b/lightning/src/ln/channelmonitor.rs
deleted file mode 100644 (file)
index 91f81fa..0000000
+++ /dev/null
@@ -1,2827 +0,0 @@
-// This file is Copyright its original authors, visible in version control
-// history.
-//
-// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
-// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
-// You may not use this file except in accordance with one or both of these
-// licenses.
-
-//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
-//! here.
-//!
-//! ChannelMonitor objects are generated by ChannelManager in response to relevant
-//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
-//! be made in responding to certain messages, see ManyChannelMonitor for more.
-//!
-//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
-//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
-//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
-//! security-domain-separated system design, you should consider having multiple paths for
-//! ChannelMonitors to get out of the HSM and onto monitoring devices.
-
-use bitcoin::blockdata::block::BlockHeader;
-use bitcoin::blockdata::transaction::{TxOut,Transaction};
-use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-use bitcoin::blockdata::script::{Script, Builder};
-use bitcoin::blockdata::opcodes;
-use bitcoin::consensus::encode;
-
-use bitcoin::hashes::Hash;
-use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
-
-use bitcoin::secp256k1::{Secp256k1,Signature};
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
-use bitcoin::secp256k1;
-
-use ln::msgs::DecodeError;
-use ln::chan_utils;
-use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HolderCommitmentTransaction, HTLCType};
-use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
-use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
-use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator};
-use chain::transaction::OutPoint;
-use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
-use util::logger::Logger;
-use util::ser::{Readable, MaybeReadable, Writer, Writeable, U48};
-use util::{byte_utils, events};
-use util::events::Event;
-
-use std::collections::{HashMap, hash_map};
-use std::sync::Mutex;
-use std::{hash,cmp, mem};
-use std::ops::Deref;
-use std::io::Error;
-
-/// An update generated by the underlying Channel itself which contains some new information the
-/// ChannelMonitor should be made aware of.
-#[cfg_attr(test, derive(PartialEq))]
-#[derive(Clone)]
-#[must_use]
-pub struct ChannelMonitorUpdate {
-       pub(super) updates: Vec<ChannelMonitorUpdateStep>,
-       /// The sequence number of this update. Updates *must* be replayed in-order according to this
-       /// sequence number (and updates may panic if they are not). The update_id values are strictly
-       /// increasing and increase by one for each new update.
-       ///
-       /// This sequence number is also used to track up to which points updates which returned
-       /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
-       /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
-       pub update_id: u64,
-}
-
-impl Writeable for ChannelMonitorUpdate {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
-               self.update_id.write(w)?;
-               (self.updates.len() as u64).write(w)?;
-               for update_step in self.updates.iter() {
-                       update_step.write(w)?;
-               }
-               Ok(())
-       }
-}
-impl Readable for ChannelMonitorUpdate {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-               let update_id: u64 = Readable::read(r)?;
-               let len: u64 = Readable::read(r)?;
-               let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<ChannelMonitorUpdateStep>()));
-               for _ in 0..len {
-                       updates.push(Readable::read(r)?);
-               }
-               Ok(Self { update_id, updates })
-       }
-}
-
-/// An error enum representing a failure to persist a channel monitor update.
-#[derive(Clone)]
-pub enum ChannelMonitorUpdateErr {
-       /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
-       /// our state failed, but is expected to succeed at some point in the future).
-       ///
-       /// Such a failure will "freeze" a channel, preventing us from revoking old states or
-       /// submitting new commitment transactions to the counterparty. Once the update(s) which failed
-       /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
-       /// restore the channel to an operational state.
-       ///
-       /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
-       /// you return a TemporaryFailure you must ensure that it is written to disk safely before
-       /// writing out the latest ChannelManager state.
-       ///
-       /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
-       /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
-       /// to claim it on this channel) and those updates must be applied wherever they can be. At
-       /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
-       /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
-       /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
-       /// been "frozen".
-       ///
-       /// Note that even if updates made after TemporaryFailure succeed you must still call
-       /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
-       /// operation.
-       ///
-       /// Note that the update being processed here will not be replayed for you when you call
-       /// ChannelManager::channel_monitor_updated, so you must store the update itself along
-       /// with the persisted ChannelMonitor on your own local disk prior to returning a
-       /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
-       /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
-       /// reload-time.
-       ///
-       /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
-       /// remote location (with local copies persisted immediately), it is anticipated that all
-       /// updates will return TemporaryFailure until the remote copies could be updated.
-       TemporaryFailure,
-       /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
-       /// different watchtower and cannot update with all watchtowers that were previously informed
-       /// of this channel).
-       ///
-       /// At reception of this error, ChannelManager will force-close the channel and return at
-       /// least a final ChannelMonitorUpdate::ChannelForceClosed which must be delivered to at
-       /// least one ChannelMonitor copy. Revocation secret MUST NOT be released and offchain channel
-       /// update must be rejected.
-       ///
-       /// This failure may also signal a failure to update the local persisted copy of one of
-       /// the channel monitor instance.
-       ///
-       /// Note that even when you fail a holder commitment transaction update, you must store the
-       /// update to ensure you can claim from it in case of a duplicate copy of this ChannelMonitor
-       /// broadcasts it (e.g distributed channel-monitor deployment)
-       PermanentFailure,
-}
-
-/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
-/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
-/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
-/// corrupted.
-/// Contains a human-readable error message.
-#[derive(Debug)]
-pub struct MonitorUpdateError(pub &'static str);
-
-/// An event to be processed by the ChannelManager.
-#[derive(PartialEq)]
-pub enum MonitorEvent {
-       /// A monitor event containing an HTLCUpdate.
-       HTLCEvent(HTLCUpdate),
-
-       /// A monitor event that the Channel's commitment transaction was broadcasted.
-       CommitmentTxBroadcasted(OutPoint),
-}
-
-/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
-/// forward channel and from which info are needed to update HTLC in a backward channel.
-#[derive(Clone, PartialEq)]
-pub struct HTLCUpdate {
-       pub(super) payment_hash: PaymentHash,
-       pub(super) payment_preimage: Option<PaymentPreimage>,
-       pub(super) source: HTLCSource
-}
-impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
-
-/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
-/// watchtower or watch our own channels.
-///
-/// Note that you must provide your own key by which to refer to channels.
-///
-/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
-/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
-/// index by a PublicKey which is required to sign any updates.
-///
-/// If you're using this for local monitoring of your own channels, you probably want to use
-/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
-///
-/// (C-not exported) due to an unconstrained generic in `Key`
-pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref>
-       where T::Target: BroadcasterInterface,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
-        C::Target: ChainWatchInterface,
-{
-       /// The monitors
-       pub monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
-       chain_monitor: C,
-       broadcaster: T,
-       logger: L,
-       fee_estimator: F
-}
-
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send>
-       ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
-       where T::Target: BroadcasterInterface,
-             F::Target: FeeEstimator,
-             L::Target: Logger,
-        C::Target: ChainWatchInterface,
-{
-       fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[usize]) {
-               let block_hash = header.block_hash();
-               {
-                       let mut monitors = self.monitors.lock().unwrap();
-                       for monitor in monitors.values_mut() {
-                               let txn_outputs = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
-
-                               for (ref txid, ref outputs) in txn_outputs {
-                                       for (idx, output) in outputs.iter().enumerate() {
-                                               self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
-                                       }
-                               }
-                       }
-               }
-       }
-
-       fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
-               let block_hash = header.block_hash();
-               let mut monitors = self.monitors.lock().unwrap();
-               for monitor in monitors.values_mut() {
-                       monitor.block_disconnected(disconnected_height, &block_hash, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
-               }
-       }
-}
-
-impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
-       where T::Target: BroadcasterInterface,
-             F::Target: FeeEstimator,
-             L::Target: Logger,
-        C::Target: ChainWatchInterface,
-{
-       /// Creates a new object which can be used to monitor several channels given the chain
-       /// interface with which to register to receive notifications.
-       pub fn new(chain_monitor: C, broadcaster: T, logger: L, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C> {
-               let res = SimpleManyChannelMonitor {
-                       monitors: Mutex::new(HashMap::new()),
-                       chain_monitor,
-                       broadcaster,
-                       logger,
-                       fee_estimator: feeest,
-               };
-
-               res
-       }
-
-       /// Adds or updates the monitor which monitors the channel referred to by the given key.
-       pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
-               let mut monitors = self.monitors.lock().unwrap();
-               let entry = match monitors.entry(key) {
-                       hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")),
-                       hash_map::Entry::Vacant(e) => e,
-               };
-               {
-                       let funding_txo = monitor.get_funding_txo();
-                       log_trace!(self.logger, "Got new Channel Monitor for channel {}", log_bytes!(funding_txo.0.to_channel_id()[..]));
-                       self.chain_monitor.install_watch_tx(&funding_txo.0.txid, &funding_txo.1);
-                       self.chain_monitor.install_watch_outpoint((funding_txo.0.txid, funding_txo.0.index as u32), &funding_txo.1);
-                       for (txid, outputs) in monitor.get_outputs_to_watch().iter() {
-                               for (idx, script) in outputs.iter().enumerate() {
-                                       self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script);
-                               }
-                       }
-               }
-               entry.insert(monitor);
-               Ok(())
-       }
-
-       /// Updates the monitor which monitors the channel referred to by the given key.
-       pub fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
-               let mut monitors = self.monitors.lock().unwrap();
-               match monitors.get_mut(&key) {
-                       Some(orig_monitor) => {
-                               log_trace!(self.logger, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor));
-                               orig_monitor.update_monitor(update, &self.broadcaster, &self.logger)
-                       },
-                       None => Err(MonitorUpdateError("No such monitor registered"))
-               }
-       }
-}
-
-impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send, C: Deref + Sync + Send> ManyChannelMonitor for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F, L, C>
-       where T::Target: BroadcasterInterface,
-             F::Target: FeeEstimator,
-             L::Target: Logger,
-        C::Target: ChainWatchInterface,
-{
-       type Keys = ChanSigner;
-
-       fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
-               match self.add_monitor_by_key(funding_txo, monitor) {
-                       Ok(_) => Ok(()),
-                       Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
-               }
-       }
-
-       fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
-               match self.update_monitor_by_key(funding_txo, update) {
-                       Ok(_) => Ok(()),
-                       Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
-               }
-       }
-
-       fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
-               let mut pending_monitor_events = Vec::new();
-               for chan in self.monitors.lock().unwrap().values_mut() {
-                       pending_monitor_events.append(&mut chan.get_and_clear_pending_monitor_events());
-               }
-               pending_monitor_events
-       }
-}
-
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref, L: Deref, C: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F, L, C>
-       where T::Target: BroadcasterInterface,
-             F::Target: FeeEstimator,
-             L::Target: Logger,
-        C::Target: ChainWatchInterface,
-{
-       fn get_and_clear_pending_events(&self) -> Vec<Event> {
-               let mut pending_events = Vec::new();
-               for chan in self.monitors.lock().unwrap().values_mut() {
-                       pending_events.append(&mut chan.get_and_clear_pending_events());
-               }
-               pending_events
-       }
-}
-
-/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
-/// instead claiming it in its own individual transaction.
-pub(crate) const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
-/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
-/// HTLC-Success transaction.
-/// In other words, this is an upper bound on how many blocks we think it can take us to get a
-/// transaction confirmed (and we use it in a few more, equivalent, places).
-pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
-/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
-/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
-/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
-/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
-/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
-/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
-/// due to expiration but increase the cost of funds being locked longuer in case of failure.
-/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
-/// accurate block height.
-/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
-/// with at worst this delay, so we are not only using this value as a mercy for them but also
-/// us as a safeguard to delay with enough time.
-pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
-/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
-/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
-/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
-/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
-/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
-/// keeping bumping another claim tx to solve the outpoint.
-pub(crate) const ANTI_REORG_DELAY: u32 = 6;
-/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
-/// refuse to accept a new HTLC.
-///
-/// This is used for a few separate purposes:
-/// 1) if we've received an MPP HTLC to us and it expires within this many blocks and we are
-///    waiting on additional parts (or waiting on the preimage for any HTLC from the user), we will
-///    fail this HTLC,
-/// 2) if we receive an HTLC within this many blocks of its expiry (plus one to avoid a race
-///    condition with the above), we will fail this HTLC without telling the user we received it,
-/// 3) if we are waiting on a connection or a channel state update to send an HTLC to a peer, and
-///    that HTLC expires within this many blocks, we will simply fail the HTLC instead.
-///
-/// (1) is all about protecting us - we need enough time to update the channel state before we hit
-/// CLTV_CLAIM_BUFFER, at which point we'd go on chain to claim the HTLC with the preimage.
-///
-/// (2) is the same, but with an additional buffer to avoid accepting an HTLC which is immediately
-/// in a race condition between the user connecting a block (which would fail it) and the user
-/// providing us the preimage (which would claim it).
-///
-/// (3) is about our counterparty - we don't want to relay an HTLC to a counterparty when they may
-/// end up force-closing the channel on us to claim it.
-pub(crate) const HTLC_FAIL_BACK_BUFFER: u32 = CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS;
-
-#[derive(Clone, PartialEq)]
-struct HolderSignedTx {
-       /// txid of the transaction in tx, just used to make comparison faster
-       txid: Txid,
-       revocation_key: PublicKey,
-       a_htlc_key: PublicKey,
-       b_htlc_key: PublicKey,
-       delayed_payment_key: PublicKey,
-       per_commitment_point: PublicKey,
-       feerate_per_kw: u32,
-       htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
-}
-
-/// We use this to track counterparty commitment transactions and htlcs outputs and
-/// use it to generate any justice or 2nd-stage preimage/timeout transactions.
-#[derive(PartialEq)]
-struct CounterpartyCommitmentTransaction {
-       counterparty_delayed_payment_base_key: PublicKey,
-       counterparty_htlc_base_key: PublicKey,
-       on_counterparty_tx_csv: u16,
-       per_htlc: HashMap<Txid, Vec<HTLCOutputInCommitment>>
-}
-
-impl Writeable for CounterpartyCommitmentTransaction {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
-               self.counterparty_delayed_payment_base_key.write(w)?;
-               self.counterparty_htlc_base_key.write(w)?;
-               w.write_all(&byte_utils::be16_to_array(self.on_counterparty_tx_csv))?;
-               w.write_all(&byte_utils::be64_to_array(self.per_htlc.len() as u64))?;
-               for (ref txid, ref htlcs) in self.per_htlc.iter() {
-                       w.write_all(&txid[..])?;
-                       w.write_all(&byte_utils::be64_to_array(htlcs.len() as u64))?;
-                       for &ref htlc in htlcs.iter() {
-                               htlc.write(w)?;
-                       }
-               }
-               Ok(())
-       }
-}
-impl Readable for CounterpartyCommitmentTransaction {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-               let counterparty_commitment_transaction = {
-                       let counterparty_delayed_payment_base_key = Readable::read(r)?;
-                       let counterparty_htlc_base_key = Readable::read(r)?;
-                       let on_counterparty_tx_csv: u16 = Readable::read(r)?;
-                       let per_htlc_len: u64 = Readable::read(r)?;
-                       let mut per_htlc = HashMap::with_capacity(cmp::min(per_htlc_len as usize, MAX_ALLOC_SIZE / 64));
-                       for _  in 0..per_htlc_len {
-                               let txid: Txid = Readable::read(r)?;
-                               let htlcs_count: u64 = Readable::read(r)?;
-                               let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
-                               for _ in 0..htlcs_count {
-                                       let htlc = Readable::read(r)?;
-                                       htlcs.push(htlc);
-                               }
-                               if let Some(_) = per_htlc.insert(txid, htlcs) {
-                                       return Err(DecodeError::InvalidValue);
-                               }
-                       }
-                       CounterpartyCommitmentTransaction {
-                               counterparty_delayed_payment_base_key,
-                               counterparty_htlc_base_key,
-                               on_counterparty_tx_csv,
-                               per_htlc,
-                       }
-               };
-               Ok(counterparty_commitment_transaction)
-       }
-}
-
-/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
-/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
-/// a new bumped one in case of lenghty confirmation delay
-#[derive(Clone, PartialEq)]
-pub(crate) enum InputMaterial {
-       Revoked {
-               per_commitment_point: PublicKey,
-               counterparty_delayed_payment_base_key: PublicKey,
-               counterparty_htlc_base_key: PublicKey,
-               per_commitment_key: SecretKey,
-               input_descriptor: InputDescriptors,
-               amount: u64,
-               htlc: Option<HTLCOutputInCommitment>,
-               on_counterparty_tx_csv: u16,
-       },
-       CounterpartyHTLC {
-               per_commitment_point: PublicKey,
-               counterparty_delayed_payment_base_key: PublicKey,
-               counterparty_htlc_base_key: PublicKey,
-               preimage: Option<PaymentPreimage>,
-               htlc: HTLCOutputInCommitment
-       },
-       HolderHTLC {
-               preimage: Option<PaymentPreimage>,
-               amount: u64,
-       },
-       Funding {
-               funding_redeemscript: Script,
-       }
-}
-
-impl Writeable for InputMaterial  {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &InputMaterial::Revoked { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref per_commitment_key, ref input_descriptor, ref amount, ref htlc, ref on_counterparty_tx_csv} => {
-                               writer.write_all(&[0; 1])?;
-                               per_commitment_point.write(writer)?;
-                               counterparty_delayed_payment_base_key.write(writer)?;
-                               counterparty_htlc_base_key.write(writer)?;
-                               writer.write_all(&per_commitment_key[..])?;
-                               input_descriptor.write(writer)?;
-                               writer.write_all(&byte_utils::be64_to_array(*amount))?;
-                               htlc.write(writer)?;
-                               on_counterparty_tx_csv.write(writer)?;
-                       },
-                       &InputMaterial::CounterpartyHTLC { ref per_commitment_point, ref counterparty_delayed_payment_base_key, ref counterparty_htlc_base_key, ref preimage, ref htlc} => {
-                               writer.write_all(&[1; 1])?;
-                               per_commitment_point.write(writer)?;
-                               counterparty_delayed_payment_base_key.write(writer)?;
-                               counterparty_htlc_base_key.write(writer)?;
-                               preimage.write(writer)?;
-                               htlc.write(writer)?;
-                       },
-                       &InputMaterial::HolderHTLC { ref preimage, ref amount } => {
-                               writer.write_all(&[2; 1])?;
-                               preimage.write(writer)?;
-                               writer.write_all(&byte_utils::be64_to_array(*amount))?;
-                       },
-                       &InputMaterial::Funding { ref funding_redeemscript } => {
-                               writer.write_all(&[3; 1])?;
-                               funding_redeemscript.write(writer)?;
-                       }
-               }
-               Ok(())
-       }
-}
-
-impl Readable for InputMaterial {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
-               let input_material = match <u8 as Readable>::read(reader)? {
-                       0 => {
-                               let per_commitment_point = Readable::read(reader)?;
-                               let counterparty_delayed_payment_base_key = Readable::read(reader)?;
-                               let counterparty_htlc_base_key = Readable::read(reader)?;
-                               let per_commitment_key = Readable::read(reader)?;
-                               let input_descriptor = Readable::read(reader)?;
-                               let amount = Readable::read(reader)?;
-                               let htlc = Readable::read(reader)?;
-                               let on_counterparty_tx_csv = Readable::read(reader)?;
-                               InputMaterial::Revoked {
-                                       per_commitment_point,
-                                       counterparty_delayed_payment_base_key,
-                                       counterparty_htlc_base_key,
-                                       per_commitment_key,
-                                       input_descriptor,
-                                       amount,
-                                       htlc,
-                                       on_counterparty_tx_csv
-                               }
-                       },
-                       1 => {
-                               let per_commitment_point = Readable::read(reader)?;
-                               let counterparty_delayed_payment_base_key = Readable::read(reader)?;
-                               let counterparty_htlc_base_key = Readable::read(reader)?;
-                               let preimage = Readable::read(reader)?;
-                               let htlc = Readable::read(reader)?;
-                               InputMaterial::CounterpartyHTLC {
-                                       per_commitment_point,
-                                       counterparty_delayed_payment_base_key,
-                                       counterparty_htlc_base_key,
-                                       preimage,
-                                       htlc
-                               }
-                       },
-                       2 => {
-                               let preimage = Readable::read(reader)?;
-                               let amount = Readable::read(reader)?;
-                               InputMaterial::HolderHTLC {
-                                       preimage,
-                                       amount,
-                               }
-                       },
-                       3 => {
-                               InputMaterial::Funding {
-                                       funding_redeemscript: Readable::read(reader)?,
-                               }
-                       }
-                       _ => return Err(DecodeError::InvalidValue),
-               };
-               Ok(input_material)
-       }
-}
-
-/// ClaimRequest is a descriptor structure to communicate between detection
-/// and reaction module. They are generated by ChannelMonitor while parsing
-/// onchain txn leaked from a channel and handed over to OnchainTxHandler which
-/// is responsible for opportunistic aggregation, selecting and enforcing
-/// bumping logic, building and signing transactions.
-pub(crate) struct ClaimRequest {
-       // Block height before which claiming is exclusive to one party,
-       // after reaching it, claiming may be contentious.
-       pub(crate) absolute_timelock: u32,
-       // Timeout tx must have nLocktime set which means aggregating multiple
-       // ones must take the higher nLocktime among them to satisfy all of them.
-       // Sadly it has few pitfalls, a) it takes longuer to get fund back b) CLTV_DELTA
-       // of a sooner-HTLC could be swallowed by the highest nLocktime of the HTLC set.
-       // Do simplify we mark them as non-aggregable.
-       pub(crate) aggregable: bool,
-       // Basic bitcoin outpoint (txid, vout)
-       pub(crate) outpoint: BitcoinOutPoint,
-       // Following outpoint type, set of data needed to generate transaction digest
-       // and satisfy witness program.
-       pub(crate) witness_data: InputMaterial
-}
-
-/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
-/// once they mature to enough confirmations (ANTI_REORG_DELAY)
-#[derive(Clone, PartialEq)]
-enum OnchainEvent {
-       /// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve
-       /// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can
-       /// only win from it, so it's never an OnchainEvent
-       HTLCUpdate {
-               htlc_update: (HTLCSource, PaymentHash),
-       },
-       MaturingOutput {
-               descriptor: SpendableOutputDescriptor,
-       },
-}
-
-const SERIALIZATION_VERSION: u8 = 1;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
-
-#[cfg_attr(test, derive(PartialEq))]
-#[derive(Clone)]
-pub(super) enum ChannelMonitorUpdateStep {
-       LatestHolderCommitmentTXInfo {
-               commitment_tx: HolderCommitmentTransaction,
-               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
-       },
-       LatestCounterpartyCommitmentTXInfo {
-               unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here
-               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
-               commitment_number: u64,
-               their_revocation_point: PublicKey,
-       },
-       PaymentPreimage {
-               payment_preimage: PaymentPreimage,
-       },
-       CommitmentSecret {
-               idx: u64,
-               secret: [u8; 32],
-       },
-       /// Used to indicate that the no future updates will occur, and likely that the latest holder
-       /// commitment transaction(s) should be broadcast, as the channel has been force-closed.
-       ChannelForceClosed {
-               /// If set to false, we shouldn't broadcast the latest holder commitment transaction as we
-               /// think we've fallen behind!
-               should_broadcast: bool,
-       },
-}
-
-impl Writeable for ChannelMonitorUpdateStep {
-       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
-               match self {
-                       &ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { ref commitment_tx, ref htlc_outputs } => {
-                               0u8.write(w)?;
-                               commitment_tx.write(w)?;
-                               (htlc_outputs.len() as u64).write(w)?;
-                               for &(ref output, ref signature, ref source) in htlc_outputs.iter() {
-                                       output.write(w)?;
-                                       signature.write(w)?;
-                                       source.write(w)?;
-                               }
-                       }
-                       &ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => {
-                               1u8.write(w)?;
-                               unsigned_commitment_tx.write(w)?;
-                               commitment_number.write(w)?;
-                               their_revocation_point.write(w)?;
-                               (htlc_outputs.len() as u64).write(w)?;
-                               for &(ref output, ref source) in htlc_outputs.iter() {
-                                       output.write(w)?;
-                                       source.as_ref().map(|b| b.as_ref()).write(w)?;
-                               }
-                       },
-                       &ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => {
-                               2u8.write(w)?;
-                               payment_preimage.write(w)?;
-                       },
-                       &ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => {
-                               3u8.write(w)?;
-                               idx.write(w)?;
-                               secret.write(w)?;
-                       },
-                       &ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => {
-                               4u8.write(w)?;
-                               should_broadcast.write(w)?;
-                       },
-               }
-               Ok(())
-       }
-}
-impl Readable for ChannelMonitorUpdateStep {
-       fn read<R: ::std::io::Read>(r: &mut R) -> Result<Self, DecodeError> {
-               match Readable::read(r)? {
-                       0u8 => {
-                               Ok(ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
-                                       commitment_tx: Readable::read(r)?,
-                                       htlc_outputs: {
-                                               let len: u64 = Readable::read(r)?;
-                                               let mut res = Vec::new();
-                                               for _ in 0..len {
-                                                       res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?));
-                                               }
-                                               res
-                                       },
-                               })
-                       },
-                       1u8 => {
-                               Ok(ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
-                                       unsigned_commitment_tx: Readable::read(r)?,
-                                       commitment_number: Readable::read(r)?,
-                                       their_revocation_point: Readable::read(r)?,
-                                       htlc_outputs: {
-                                               let len: u64 = Readable::read(r)?;
-                                               let mut res = Vec::new();
-                                               for _ in 0..len {
-                                                       res.push((Readable::read(r)?, <Option<HTLCSource> as Readable>::read(r)?.map(|o| Box::new(o))));
-                                               }
-                                               res
-                                       },
-                               })
-                       },
-                       2u8 => {
-                               Ok(ChannelMonitorUpdateStep::PaymentPreimage {
-                                       payment_preimage: Readable::read(r)?,
-                               })
-                       },
-                       3u8 => {
-                               Ok(ChannelMonitorUpdateStep::CommitmentSecret {
-                                       idx: Readable::read(r)?,
-                                       secret: Readable::read(r)?,
-                               })
-                       },
-                       4u8 => {
-                               Ok(ChannelMonitorUpdateStep::ChannelForceClosed {
-                                       should_broadcast: Readable::read(r)?
-                               })
-                       },
-                       _ => Err(DecodeError::InvalidValue),
-               }
-       }
-}
-
-/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
-/// on-chain transactions to ensure no loss of funds occurs.
-///
-/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
-/// information and are actively monitoring the chain.
-///
-/// Pending Events or updated HTLCs which have not yet been read out by
-/// get_and_clear_pending_monitor_events or get_and_clear_pending_events are serialized to disk and
-/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
-/// gotten are fully handled before re-serializing the new state.
-pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
-       latest_update_id: u64,
-       commitment_transaction_number_obscure_factor: u64,
-
-       destination_script: Script,
-       broadcasted_holder_revokable_script: Option<(Script, PublicKey, PublicKey)>,
-       counterparty_payment_script: Script,
-       shutdown_script: Script,
-
-       keys: ChanSigner,
-       funding_info: (OutPoint, Script),
-       current_counterparty_commitment_txid: Option<Txid>,
-       prev_counterparty_commitment_txid: Option<Txid>,
-
-       counterparty_tx_cache: CounterpartyCommitmentTransaction,
-       funding_redeemscript: Script,
-       channel_value_satoshis: u64,
-       // first is the idx of the first of the two revocation points
-       their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
-
-       on_holder_tx_csv: u16,
-
-       commitment_secrets: CounterpartyCommitmentSecrets,
-       counterparty_claimable_outpoints: HashMap<Txid, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
-       /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
-       /// Nor can we figure out their commitment numbers without the commitment transaction they are
-       /// spending. Thus, in order to claim them via revocation key, we track all the counterparty
-       /// commitment transactions which we find on-chain, mapping them to the commitment number which
-       /// can be used to derive the revocation key and claim the transactions.
-       counterparty_commitment_txn_on_chain: HashMap<Txid, (u64, Vec<Script>)>,
-       /// Cache used to make pruning of payment_preimages faster.
-       /// Maps payment_hash values to commitment numbers for counterparty transactions for non-revoked
-       /// counterparty transactions (ie should remain pretty small).
-       /// Serialized to disk but should generally not be sent to Watchtowers.
-       counterparty_hash_commitment_number: HashMap<PaymentHash, u64>,
-
-       // We store two holder commitment transactions to avoid any race conditions where we may update
-       // some monitors (potentially on watchtowers) but then fail to update others, resulting in the
-       // various monitors for one channel being out of sync, and us broadcasting a holder
-       // transaction for which we have deleted claim information on some watchtowers.
-       prev_holder_signed_commitment_tx: Option<HolderSignedTx>,
-       current_holder_commitment_tx: HolderSignedTx,
-
-       // Used just for ChannelManager to make sure it has the latest channel data during
-       // deserialization
-       current_counterparty_commitment_number: u64,
-       // Used just for ChannelManager to make sure it has the latest channel data during
-       // deserialization
-       current_holder_commitment_number: u64,
-
-       payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
-
-       pending_monitor_events: Vec<MonitorEvent>,
-       pending_events: Vec<Event>,
-
-       // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
-       // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
-       // actions when we receive a block with given height. Actions depend on OnchainEvent type.
-       onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
-
-       // If we get serialized out and re-read, we need to make sure that the chain monitoring
-       // interface knows about the TXOs that we want to be notified of spends of. We could probably
-       // be smart and derive them from the above storage fields, but its much simpler and more
-       // Obviously Correct (tm) if we just keep track of them explicitly.
-       outputs_to_watch: HashMap<Txid, Vec<Script>>,
-
-       #[cfg(test)]
-       pub onchain_tx_handler: OnchainTxHandler<ChanSigner>,
-       #[cfg(not(test))]
-       onchain_tx_handler: OnchainTxHandler<ChanSigner>,
-
-       // This is set when the Channel[Manager] generated a ChannelMonitorUpdate which indicated the
-       // channel has been force-closed. After this is set, no further holder commitment transaction
-       // updates may occur, and we panic!() if one is provided.
-       lockdown_from_offchain: bool,
-
-       // Set once we've signed a holder commitment transaction and handed it over to our
-       // OnchainTxHandler. After this is set, no future updates to our holder commitment transactions
-       // may occur, and we fail any such monitor updates.
-       //
-       // In case of update rejection due to a locally already signed commitment transaction, we
-       // nevertheless store update content to track in case of concurrent broadcast by another
-       // remote monitor out-of-order with regards to the block view.
-       holder_tx_signed: bool,
-
-       // We simply modify last_block_hash in Channel's block_connected so that serialization is
-       // consistent but hopefully the users' copy handles block_connected in a consistent way.
-       // (we do *not*, however, update them in update_monitor to ensure any local user copies keep
-       // their last_block_hash from its state and not based on updated copies that didn't run through
-       // the full block_connected).
-       last_block_hash: BlockHash,
-       secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
-}
-
-/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
-/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
-/// events to it, while also taking any add/update_monitor events and passing them to some remote
-/// server(s).
-///
-/// In general, you must always have at least one local copy in memory, which must never fail to
-/// update (as it is responsible for broadcasting the latest state in case the channel is closed),
-/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails
-/// to update (eg out-of-memory or some other condition), you must immediately shut down without
-/// taking any further action such as writing the current state to disk. This should likely be
-/// accomplished via panic!() or abort().
-///
-/// Note that any updates to a channel's monitor *must* be applied to each instance of the
-/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
-/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
-/// which we have revoked, allowing our counterparty to claim all funds in the channel!
-///
-/// User needs to notify implementors of ManyChannelMonitor when a new block is connected or
-/// disconnected using their `block_connected` and `block_disconnected` methods. However, rather
-/// than calling these methods directly, the user should register implementors as listeners to the
-/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
-/// all registered listeners in one go.
-pub trait ManyChannelMonitor: Send + Sync {
-       /// The concrete type which signs for transactions and provides access to our channel public
-       /// keys.
-       type Keys: ChannelKeys;
-
-       /// Adds a monitor for the given `funding_txo`.
-       ///
-       /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
-       /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
-       /// callbacks with the funding transaction, or any spends of it.
-       ///
-       /// Further, the implementer must also ensure that each output returned in
-       /// monitor.get_outputs_to_watch() is registered to ensure that the provided monitor learns about
-       /// any spends of any of the outputs.
-       ///
-       /// Any spends of outputs which should have been registered which aren't passed to
-       /// ChannelMonitors via block_connected may result in FUNDS LOSS.
-       fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<Self::Keys>) -> Result<(), ChannelMonitorUpdateErr>;
-
-       /// Updates a monitor for the given `funding_txo`.
-       ///
-       /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
-       /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
-       /// callbacks with the funding transaction, or any spends of it.
-       ///
-       /// Further, the implementer must also ensure that each output returned in
-       /// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about
-       /// any spends of any of the outputs.
-       ///
-       /// Any spends of outputs which should have been registered which aren't passed to
-       /// ChannelMonitors via block_connected may result in FUNDS LOSS.
-       ///
-       /// In case of distributed watchtowers deployment, even if an Err is return, the new version
-       /// must be written to disk, as state may have been stored but rejected due to a block forcing
-       /// a commitment broadcast. This storage is used to claim outputs of rejected state confirmed
-       /// onchain by another watchtower, lagging behind on block processing.
-       fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
-
-       /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
-       /// with success or failure.
-       ///
-       /// You should probably just call through to
-       /// ChannelMonitor::get_and_clear_pending_monitor_events() for each ChannelMonitor and return
-       /// the full list.
-       fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent>;
-}
-
-#[cfg(any(test, feature = "fuzztarget"))]
-/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
-/// underlying object
-impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
-       fn eq(&self, other: &Self) -> bool {
-               if self.latest_update_id != other.latest_update_id ||
-                       self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
-                       self.destination_script != other.destination_script ||
-                       self.broadcasted_holder_revokable_script != other.broadcasted_holder_revokable_script ||
-                       self.counterparty_payment_script != other.counterparty_payment_script ||
-                       self.keys.pubkeys() != other.keys.pubkeys() ||
-                       self.funding_info != other.funding_info ||
-                       self.current_counterparty_commitment_txid != other.current_counterparty_commitment_txid ||
-                       self.prev_counterparty_commitment_txid != other.prev_counterparty_commitment_txid ||
-                       self.counterparty_tx_cache != other.counterparty_tx_cache ||
-                       self.funding_redeemscript != other.funding_redeemscript ||
-                       self.channel_value_satoshis != other.channel_value_satoshis ||
-                       self.their_cur_revocation_points != other.their_cur_revocation_points ||
-                       self.on_holder_tx_csv != other.on_holder_tx_csv ||
-                       self.commitment_secrets != other.commitment_secrets ||
-                       self.counterparty_claimable_outpoints != other.counterparty_claimable_outpoints ||
-                       self.counterparty_commitment_txn_on_chain != other.counterparty_commitment_txn_on_chain ||
-                       self.counterparty_hash_commitment_number != other.counterparty_hash_commitment_number ||
-                       self.prev_holder_signed_commitment_tx != other.prev_holder_signed_commitment_tx ||
-                       self.current_counterparty_commitment_number != other.current_counterparty_commitment_number ||
-                       self.current_holder_commitment_number != other.current_holder_commitment_number ||
-                       self.current_holder_commitment_tx != other.current_holder_commitment_tx ||
-                       self.payment_preimages != other.payment_preimages ||
-                       self.pending_monitor_events != other.pending_monitor_events ||
-                       self.pending_events.len() != other.pending_events.len() || // We trust events to round-trip properly
-                       self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf ||
-                       self.outputs_to_watch != other.outputs_to_watch ||
-                       self.lockdown_from_offchain != other.lockdown_from_offchain ||
-                       self.holder_tx_signed != other.holder_tx_signed
-               {
-                       false
-               } else {
-                       true
-               }
-       }
-}
-
-impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
-       /// Writes this monitor into the given writer, suitable for writing to disk.
-       ///
-       /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
-       /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
-       /// the "reorg path" (ie disconnecting blocks until you find a common ancestor from both the
-       /// returned block hash and the the current chain and then reconnecting blocks to get to the
-       /// best chain) upon deserializing the object!
-       pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
-               //TODO: We still write out all the serialization here manually instead of using the fancy
-               //serialization framework we have, we should migrate things over to it.
-               writer.write_all(&[SERIALIZATION_VERSION; 1])?;
-               writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
-
-               self.latest_update_id.write(writer)?;
-
-               // Set in initial Channel-object creation, so should always be set by now:
-               U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
-
-               self.destination_script.write(writer)?;
-               if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
-                       writer.write_all(&[0; 1])?;
-                       broadcasted_holder_revokable_script.0.write(writer)?;
-                       broadcasted_holder_revokable_script.1.write(writer)?;
-                       broadcasted_holder_revokable_script.2.write(writer)?;
-               } else {
-                       writer.write_all(&[1; 1])?;
-               }
-
-               self.counterparty_payment_script.write(writer)?;
-               self.shutdown_script.write(writer)?;
-
-               self.keys.write(writer)?;
-               writer.write_all(&self.funding_info.0.txid[..])?;
-               writer.write_all(&byte_utils::be16_to_array(self.funding_info.0.index))?;
-               self.funding_info.1.write(writer)?;
-               self.current_counterparty_commitment_txid.write(writer)?;
-               self.prev_counterparty_commitment_txid.write(writer)?;
-
-               self.counterparty_tx_cache.write(writer)?;
-               self.funding_redeemscript.write(writer)?;
-               self.channel_value_satoshis.write(writer)?;
-
-               match self.their_cur_revocation_points {
-                       Some((idx, pubkey, second_option)) => {
-                               writer.write_all(&byte_utils::be48_to_array(idx))?;
-                               writer.write_all(&pubkey.serialize())?;
-                               match second_option {
-                                       Some(second_pubkey) => {
-                                               writer.write_all(&second_pubkey.serialize())?;
-                                       },
-                                       None => {
-                                               writer.write_all(&[0; 33])?;
-                                       },
-                               }
-                       },
-                       None => {
-                               writer.write_all(&byte_utils::be48_to_array(0))?;
-                       },
-               }
-
-               writer.write_all(&byte_utils::be16_to_array(self.on_holder_tx_csv))?;
-
-               self.commitment_secrets.write(writer)?;
-
-               macro_rules! serialize_htlc_in_commitment {
-                       ($htlc_output: expr) => {
-                               writer.write_all(&[$htlc_output.offered as u8; 1])?;
-                               writer.write_all(&byte_utils::be64_to_array($htlc_output.amount_msat))?;
-                               writer.write_all(&byte_utils::be32_to_array($htlc_output.cltv_expiry))?;
-                               writer.write_all(&$htlc_output.payment_hash.0[..])?;
-                               $htlc_output.transaction_output_index.write(writer)?;
-                       }
-               }
-
-               writer.write_all(&byte_utils::be64_to_array(self.counterparty_claimable_outpoints.len() as u64))?;
-               for (ref txid, ref htlc_infos) in self.counterparty_claimable_outpoints.iter() {
-                       writer.write_all(&txid[..])?;
-                       writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?;
-                       for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() {
-                               serialize_htlc_in_commitment!(htlc_output);
-                               htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?;
-                       }
-               }
-
-               writer.write_all(&byte_utils::be64_to_array(self.counterparty_commitment_txn_on_chain.len() as u64))?;
-               for (ref txid, &(commitment_number, ref txouts)) in self.counterparty_commitment_txn_on_chain.iter() {
-                       writer.write_all(&txid[..])?;
-                       writer.write_all(&byte_utils::be48_to_array(commitment_number))?;
-                       (txouts.len() as u64).write(writer)?;
-                       for script in txouts.iter() {
-                               script.write(writer)?;
-                       }
-               }
-
-               writer.write_all(&byte_utils::be64_to_array(self.counterparty_hash_commitment_number.len() as u64))?;
-               for (ref payment_hash, commitment_number) in self.counterparty_hash_commitment_number.iter() {
-                       writer.write_all(&payment_hash.0[..])?;
-                       writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
-               }
-
-               macro_rules! serialize_holder_tx {
-                       ($holder_tx: expr) => {
-                               $holder_tx.txid.write(writer)?;
-                               writer.write_all(&$holder_tx.revocation_key.serialize())?;
-                               writer.write_all(&$holder_tx.a_htlc_key.serialize())?;
-                               writer.write_all(&$holder_tx.b_htlc_key.serialize())?;
-                               writer.write_all(&$holder_tx.delayed_payment_key.serialize())?;
-                               writer.write_all(&$holder_tx.per_commitment_point.serialize())?;
-
-                               writer.write_all(&byte_utils::be32_to_array($holder_tx.feerate_per_kw))?;
-                               writer.write_all(&byte_utils::be64_to_array($holder_tx.htlc_outputs.len() as u64))?;
-                               for &(ref htlc_output, ref sig, ref htlc_source) in $holder_tx.htlc_outputs.iter() {
-                                       serialize_htlc_in_commitment!(htlc_output);
-                                       if let &Some(ref their_sig) = sig {
-                                               1u8.write(writer)?;
-                                               writer.write_all(&their_sig.serialize_compact())?;
-                                       } else {
-                                               0u8.write(writer)?;
-                                       }
-                                       htlc_source.write(writer)?;
-                               }
-                       }
-               }
-
-               if let Some(ref prev_holder_tx) = self.prev_holder_signed_commitment_tx {
-                       writer.write_all(&[1; 1])?;
-                       serialize_holder_tx!(prev_holder_tx);
-               } else {
-                       writer.write_all(&[0; 1])?;
-               }
-
-               serialize_holder_tx!(self.current_holder_commitment_tx);
-
-               writer.write_all(&byte_utils::be48_to_array(self.current_counterparty_commitment_number))?;
-               writer.write_all(&byte_utils::be48_to_array(self.current_holder_commitment_number))?;
-
-               writer.write_all(&byte_utils::be64_to_array(self.payment_preimages.len() as u64))?;
-               for payment_preimage in self.payment_preimages.values() {
-                       writer.write_all(&payment_preimage.0[..])?;
-               }
-
-               writer.write_all(&byte_utils::be64_to_array(self.pending_monitor_events.len() as u64))?;
-               for event in self.pending_monitor_events.iter() {
-                       match event {
-                               MonitorEvent::HTLCEvent(upd) => {
-                                       0u8.write(writer)?;
-                                       upd.write(writer)?;
-                               },
-                               MonitorEvent::CommitmentTxBroadcasted(_) => 1u8.write(writer)?
-                       }
-               }
-
-               writer.write_all(&byte_utils::be64_to_array(self.pending_events.len() as u64))?;
-               for event in self.pending_events.iter() {
-                       event.write(writer)?;
-               }
-
-               self.last_block_hash.write(writer)?;
-
-               writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
-               for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
-                       writer.write_all(&byte_utils::be32_to_array(**target))?;
-                       writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
-                       for ev in events.iter() {
-                               match *ev {
-                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
-                                               0u8.write(writer)?;
-                                               htlc_update.0.write(writer)?;
-                                               htlc_update.1.write(writer)?;
-                                       },
-                                       OnchainEvent::MaturingOutput { ref descriptor } => {
-                                               1u8.write(writer)?;
-                                               descriptor.write(writer)?;
-                                       },
-                               }
-                       }
-               }
-
-               (self.outputs_to_watch.len() as u64).write(writer)?;
-               for (txid, output_scripts) in self.outputs_to_watch.iter() {
-                       txid.write(writer)?;
-                       (output_scripts.len() as u64).write(writer)?;
-                       for script in output_scripts.iter() {
-                               script.write(writer)?;
-                       }
-               }
-               self.onchain_tx_handler.write(writer)?;
-
-               self.lockdown_from_offchain.write(writer)?;
-               self.holder_tx_signed.write(writer)?;
-
-               Ok(())
-       }
-}
-
-impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
-       pub(super) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey,
-                       on_counterparty_tx_csv: u16, destination_script: &Script, funding_info: (OutPoint, Script),
-                       counterparty_htlc_base_key: &PublicKey, counterparty_delayed_payment_base_key: &PublicKey,
-                       on_holder_tx_csv: u16, funding_redeemscript: Script, channel_value_satoshis: u64,
-                       commitment_transaction_number_obscure_factor: u64,
-                       initial_holder_commitment_tx: HolderCommitmentTransaction) -> ChannelMonitor<ChanSigner> {
-
-               assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
-               let our_channel_close_key_hash = WPubkeyHash::hash(&shutdown_pubkey.serialize());
-               let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script();
-               let payment_key_hash = WPubkeyHash::hash(&keys.pubkeys().payment_point.serialize());
-               let counterparty_payment_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_key_hash[..]).into_script();
-
-               let counterparty_tx_cache = CounterpartyCommitmentTransaction { counterparty_delayed_payment_base_key: *counterparty_delayed_payment_base_key, counterparty_htlc_base_key: *counterparty_htlc_base_key, on_counterparty_tx_csv, per_htlc: HashMap::new() };
-
-               let mut onchain_tx_handler = OnchainTxHandler::new(destination_script.clone(), keys.clone(), on_holder_tx_csv);
-
-               let holder_tx_sequence = initial_holder_commitment_tx.unsigned_tx.input[0].sequence as u64;
-               let holder_tx_locktime = initial_holder_commitment_tx.unsigned_tx.lock_time as u64;
-               let holder_commitment_tx = HolderSignedTx {
-                       txid: initial_holder_commitment_tx.txid(),
-                       revocation_key: initial_holder_commitment_tx.keys.revocation_key,
-                       a_htlc_key: initial_holder_commitment_tx.keys.broadcaster_htlc_key,
-                       b_htlc_key: initial_holder_commitment_tx.keys.countersignatory_htlc_key,
-                       delayed_payment_key: initial_holder_commitment_tx.keys.broadcaster_delayed_payment_key,
-                       per_commitment_point: initial_holder_commitment_tx.keys.per_commitment_point,
-                       feerate_per_kw: initial_holder_commitment_tx.feerate_per_kw,
-                       htlc_outputs: Vec::new(), // There are never any HTLCs in the initial commitment transactions
-               };
-               onchain_tx_handler.provide_latest_holder_tx(initial_holder_commitment_tx);
-
-               ChannelMonitor {
-                       latest_update_id: 0,
-                       commitment_transaction_number_obscure_factor,
-
-                       destination_script: destination_script.clone(),
-                       broadcasted_holder_revokable_script: None,
-                       counterparty_payment_script,
-                       shutdown_script,
-
-                       keys,
-                       funding_info,
-                       current_counterparty_commitment_txid: None,
-                       prev_counterparty_commitment_txid: None,
-
-                       counterparty_tx_cache,
-                       funding_redeemscript,
-                       channel_value_satoshis: channel_value_satoshis,
-                       their_cur_revocation_points: None,
-
-                       on_holder_tx_csv,
-
-                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
-                       counterparty_claimable_outpoints: HashMap::new(),
-                       counterparty_commitment_txn_on_chain: HashMap::new(),
-                       counterparty_hash_commitment_number: HashMap::new(),
-
-                       prev_holder_signed_commitment_tx: None,
-                       current_holder_commitment_tx: holder_commitment_tx,
-                       current_counterparty_commitment_number: 1 << 48,
-                       current_holder_commitment_number: 0xffff_ffff_ffff - ((((holder_tx_sequence & 0xffffff) << 3*8) | (holder_tx_locktime as u64 & 0xffffff)) ^ commitment_transaction_number_obscure_factor),
-
-                       payment_preimages: HashMap::new(),
-                       pending_monitor_events: Vec::new(),
-                       pending_events: Vec::new(),
-
-                       onchain_events_waiting_threshold_conf: HashMap::new(),
-                       outputs_to_watch: HashMap::new(),
-
-                       onchain_tx_handler,
-
-                       lockdown_from_offchain: false,
-                       holder_tx_signed: false,
-
-                       last_block_hash: Default::default(),
-                       secp_ctx: Secp256k1::new(),
-               }
-       }
-
-       /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
-       /// needed by holder commitment transactions HTCLs nor by counterparty ones. Unless we haven't already seen
-       /// counterparty commitment transaction's secret, they are de facto pruned (we can use revocation key).
-       pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
-               if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
-                       return Err(MonitorUpdateError("Previous secret did not match new one"));
-               }
-
-               // Prune HTLCs from the previous counterparty commitment tx so we don't generate failure/fulfill
-               // events for now-revoked/fulfilled HTLCs.
-               if let Some(txid) = self.prev_counterparty_commitment_txid.take() {
-                       for &mut (_, ref mut source) in self.counterparty_claimable_outpoints.get_mut(&txid).unwrap() {
-                               *source = None;
-                       }
-               }
-
-               if !self.payment_preimages.is_empty() {
-                       let cur_holder_signed_commitment_tx = &self.current_holder_commitment_tx;
-                       let prev_holder_signed_commitment_tx = self.prev_holder_signed_commitment_tx.as_ref();
-                       let min_idx = self.get_min_seen_secret();
-                       let counterparty_hash_commitment_number = &mut self.counterparty_hash_commitment_number;
-
-                       self.payment_preimages.retain(|&k, _| {
-                               for &(ref htlc, _, _) in cur_holder_signed_commitment_tx.htlc_outputs.iter() {
-                                       if k == htlc.payment_hash {
-                                               return true
-                                       }
-                               }
-                               if let Some(prev_holder_commitment_tx) = prev_holder_signed_commitment_tx {
-                                       for &(ref htlc, _, _) in prev_holder_commitment_tx.htlc_outputs.iter() {
-                                               if k == htlc.payment_hash {
-                                                       return true
-                                               }
-                                       }
-                               }
-                               let contains = if let Some(cn) = counterparty_hash_commitment_number.get(&k) {
-                                       if *cn < min_idx {
-                                               return true
-                                       }
-                                       true
-                               } else { false };
-                               if contains {
-                                       counterparty_hash_commitment_number.remove(&k);
-                               }
-                               false
-                       });
-               }
-
-               Ok(())
-       }
-
-       /// Informs this monitor of the latest counterparty (ie non-broadcastable) commitment transaction.
-       /// The monitor watches for it to be broadcasted and then uses the HTLC information (and
-       /// possibly future revocation/preimage information) to claim outputs where possible.
-       /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers.
-       pub(super) fn provide_latest_counterparty_commitment_tx_info<L: Deref>(&mut self, unsigned_commitment_tx: &Transaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_revocation_point: PublicKey, logger: &L) where L::Target: Logger {
-               // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
-               // so that a remote monitor doesn't learn anything unless there is a malicious close.
-               // (only maybe, sadly we cant do the same for local info, as we need to be aware of
-               // timeouts)
-               for &(ref htlc, _) in &htlc_outputs {
-                       self.counterparty_hash_commitment_number.insert(htlc.payment_hash, commitment_number);
-               }
-
-               let new_txid = unsigned_commitment_tx.txid();
-               log_trace!(logger, "Tracking new counterparty commitment transaction with txid {} at commitment number {} with {} HTLC outputs", new_txid, commitment_number, htlc_outputs.len());
-               log_trace!(logger, "New potential counterparty commitment transaction: {}", encode::serialize_hex(unsigned_commitment_tx));
-               self.prev_counterparty_commitment_txid = self.current_counterparty_commitment_txid.take();
-               self.current_counterparty_commitment_txid = Some(new_txid);
-               self.counterparty_claimable_outpoints.insert(new_txid, htlc_outputs.clone());
-               self.current_counterparty_commitment_number = commitment_number;
-               //TODO: Merge this into the other per-counterparty-transaction output storage stuff
-               match self.their_cur_revocation_points {
-                       Some(old_points) => {
-                               if old_points.0 == commitment_number + 1 {
-                                       self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point)));
-                               } else if old_points.0 == commitment_number + 2 {
-                                       if let Some(old_second_point) = old_points.2 {
-                                               self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point)));
-                                       } else {
-                                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
-                                       }
-                               } else {
-                                       self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
-                               }
-                       },
-                       None => {
-                               self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
-                       }
-               }
-               let mut htlcs = Vec::with_capacity(htlc_outputs.len());
-               for htlc in htlc_outputs {
-                       if htlc.0.transaction_output_index.is_some() {
-                               htlcs.push(htlc.0);
-                       }
-               }
-               self.counterparty_tx_cache.per_htlc.insert(new_txid, htlcs);
-       }
-
-       /// Informs this monitor of the latest holder (ie broadcastable) commitment transaction. The
-       /// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
-       /// is important that any clones of this channel monitor (including remote clones) by kept
-       /// up-to-date as our holder commitment transaction is updated.
-       /// Panics if set_on_holder_tx_csv has never been called.
-       pub(super) fn provide_latest_holder_commitment_tx_info(&mut self, commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
-               let txid = commitment_tx.txid();
-               let sequence = commitment_tx.unsigned_tx.input[0].sequence as u64;
-               let locktime = commitment_tx.unsigned_tx.lock_time as u64;
-               let mut new_holder_commitment_tx = HolderSignedTx {
-                       txid,
-                       revocation_key: commitment_tx.keys.revocation_key,
-                       a_htlc_key: commitment_tx.keys.broadcaster_htlc_key,
-                       b_htlc_key: commitment_tx.keys.countersignatory_htlc_key,
-                       delayed_payment_key: commitment_tx.keys.broadcaster_delayed_payment_key,
-                       per_commitment_point: commitment_tx.keys.per_commitment_point,
-                       feerate_per_kw: commitment_tx.feerate_per_kw,
-                       htlc_outputs: htlc_outputs,
-               };
-               self.onchain_tx_handler.provide_latest_holder_tx(commitment_tx);
-               self.current_holder_commitment_number = 0xffff_ffff_ffff - ((((sequence & 0xffffff) << 3*8) | (locktime as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
-               mem::swap(&mut new_holder_commitment_tx, &mut self.current_holder_commitment_tx);
-               self.prev_holder_signed_commitment_tx = Some(new_holder_commitment_tx);
-               if self.holder_tx_signed {
-                       return Err(MonitorUpdateError("Latest holder commitment signed has already been signed, update is rejected"));
-               }
-               Ok(())
-       }
-
-       /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
-       /// commitment_tx_infos which contain the payment hash have been revoked.
-       pub(super) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) {
-               self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
-       }
-
-       pub(super) fn broadcast_latest_holder_commitment_txn<B: Deref, L: Deref>(&mut self, broadcaster: &B, logger: &L)
-               where B::Target: BroadcasterInterface,
-                                       L::Target: Logger,
-       {
-               for tx in self.get_latest_holder_commitment_txn(logger).iter() {
-                       broadcaster.broadcast_transaction(tx);
-               }
-               self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
-       }
-
-       /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
-       /// itself.
-       ///
-       /// panics if the given update is not the next update by update_id.
-       pub fn update_monitor<B: Deref, L: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B, logger: &L) -> Result<(), MonitorUpdateError>
-               where B::Target: BroadcasterInterface,
-                                       L::Target: Logger,
-       {
-               if self.latest_update_id + 1 != updates.update_id {
-                       panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
-               }
-               for update in updates.updates.drain(..) {
-                       match update {
-                               ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs } => {
-                                       if self.lockdown_from_offchain { panic!(); }
-                                       self.provide_latest_holder_commitment_tx_info(commitment_tx, htlc_outputs)?
-                               },
-                               ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
-                                       self.provide_latest_counterparty_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point, logger),
-                               ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
-                                       self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
-                               ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
-                                       self.provide_secret(idx, secret)?,
-                               ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
-                                       self.lockdown_from_offchain = true;
-                                       if should_broadcast {
-                                               self.broadcast_latest_holder_commitment_txn(broadcaster, logger);
-                                       } else {
-                                               log_error!(logger, "You have a toxic holder commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_holder_commitment_txn to be informed of manual action to take");
-                                       }
-                               }
-                       }
-               }
-               self.latest_update_id = updates.update_id;
-               Ok(())
-       }
-
-       /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
-       /// ChannelMonitor.
-       pub fn get_latest_update_id(&self) -> u64 {
-               self.latest_update_id
-       }
-
-       /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
-       pub fn get_funding_txo(&self) -> &(OutPoint, Script) {
-               &self.funding_info
-       }
-
-       /// Gets a list of txids, with their output scripts (in the order they appear in the
-       /// transaction), which we must learn about spends of via block_connected().
-       ///
-       /// (C-not exported) because we have no HashMap bindings
-       pub fn get_outputs_to_watch(&self) -> &HashMap<Txid, Vec<Script>> {
-               &self.outputs_to_watch
-       }
-
-       /// Gets the sets of all outpoints which this ChannelMonitor expects to hear about spends of.
-       /// Generally useful when deserializing as during normal operation the return values of
-       /// block_connected are sufficient to ensure all relevant outpoints are being monitored (note
-       /// that the get_funding_txo outpoint and transaction must also be monitored for!).
-       ///
-       /// (C-not exported) as there is no practical way to track lifetimes of returned values.
-       pub fn get_monitored_outpoints(&self) -> Vec<(Txid, u32, &Script)> {
-               let mut res = Vec::with_capacity(self.counterparty_commitment_txn_on_chain.len() * 2);
-               for (ref txid, &(_, ref outputs)) in self.counterparty_commitment_txn_on_chain.iter() {
-                       for (idx, output) in outputs.iter().enumerate() {
-                               res.push(((*txid).clone(), idx as u32, output));
-                       }
-               }
-               res
-       }
-
-       /// Get the list of HTLCs who's status has been updated on chain. This should be called by
-       /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_monitor_events().
-       pub fn get_and_clear_pending_monitor_events(&mut self) -> Vec<MonitorEvent> {
-               let mut ret = Vec::new();
-               mem::swap(&mut ret, &mut self.pending_monitor_events);
-               ret
-       }
-
-       /// Gets the list of pending events which were generated by previous actions, clearing the list
-       /// in the process.
-       ///
-       /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
-       /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
-       /// no internal locking in ChannelMonitors.
-       pub fn get_and_clear_pending_events(&mut self) -> Vec<Event> {
-               let mut ret = Vec::new();
-               mem::swap(&mut ret, &mut self.pending_events);
-               ret
-       }
-
-       /// Can only fail if idx is < get_min_seen_secret
-       pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
-               self.commitment_secrets.get_secret(idx)
-       }
-
-       pub(super) fn get_min_seen_secret(&self) -> u64 {
-               self.commitment_secrets.get_min_seen_secret()
-       }
-
-       pub(super) fn get_cur_counterparty_commitment_number(&self) -> u64 {
-               self.current_counterparty_commitment_number
-       }
-
-       pub(super) fn get_cur_holder_commitment_number(&self) -> u64 {
-               self.current_holder_commitment_number
-       }
-
-       /// Attempts to claim a counterparty commitment transaction's outputs using the revocation key and
-       /// data in counterparty_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
-       /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
-       /// HTLC-Success/HTLC-Timeout transactions.
-       /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
-       /// revoked counterparty commitment tx
-       fn check_spend_counterparty_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
-               // Most secp and related errors trying to create keys means we have no hope of constructing
-               // a spend transaction...so we return no transactions to broadcast
-               let mut claimable_outpoints = Vec::new();
-               let mut watch_outputs = Vec::new();
-
-               let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
-               let per_commitment_option = self.counterparty_claimable_outpoints.get(&commitment_txid);
-
-               macro_rules! ignore_error {
-                       ( $thing : expr ) => {
-                               match $thing {
-                                       Ok(a) => a,
-                                       Err(_) => return (claimable_outpoints, (commitment_txid, watch_outputs))
-                               }
-                       };
-               }
-
-               let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
-               if commitment_number >= self.get_min_seen_secret() {
-                       let secret = self.get_secret(commitment_number).unwrap();
-                       let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
-                       let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
-                       let revocation_pubkey = ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &self.keys.pubkeys().revocation_basepoint));
-                       let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.counterparty_tx_cache.counterparty_delayed_payment_base_key));
-
-                       let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.counterparty_tx_cache.on_counterparty_tx_csv, &delayed_key);
-                       let revokeable_p2wsh = revokeable_redeemscript.to_v0_p2wsh();
-
-                       // First, process non-htlc outputs (to_holder & to_counterparty)
-                       for (idx, outp) in tx.output.iter().enumerate() {
-                               if outp.script_pubkey == revokeable_p2wsh {
-                                       let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: outp.value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
-                                       claimable_outpoints.push(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 }, witness_data});
-                               }
-                       }
-
-                       // Then, try to find revoked htlc outputs
-                       if let Some(ref per_commitment_data) = per_commitment_option {
-                               for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
-                                       if let Some(transaction_output_index) = htlc.transaction_output_index {
-                                               if transaction_output_index as usize >= tx.output.len() ||
-                                                               tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
-                                                       return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
-                                               }
-                                               let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, per_commitment_key, input_descriptor: if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC }, amount: tx.output[transaction_output_index as usize].value, htlc: Some(htlc.clone()), on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv};
-                                               claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable: true, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
-                                       }
-                               }
-                       }
-
-                       // Last, track onchain revoked commitment transaction and fail backward outgoing HTLCs as payment path is broken
-                       if !claimable_outpoints.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
-                               // We're definitely a counterparty commitment transaction!
-                               log_trace!(logger, "Got broadcast of revoked counterparty commitment transaction, going to generate general spend tx with {} inputs", claimable_outpoints.len());
-                               watch_outputs.append(&mut tx.output.clone());
-                               self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
-
-                               macro_rules! check_htlc_fails {
-                                       ($txid: expr, $commitment_tx: expr) => {
-                                               if let Some(ref outpoints) = self.counterparty_claimable_outpoints.get($txid) {
-                                                       for &(ref htlc, ref source_option) in outpoints.iter() {
-                                                               if let &Some(ref source) = source_option {
-                                                                       log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
-                                                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
-                                                                               hash_map::Entry::Occupied(mut entry) => {
-                                                                                       let e = entry.get_mut();
-                                                                                       e.retain(|ref event| {
-                                                                                               match **event {
-                                                                                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
-                                                                                                               return htlc_update.0 != **source
-                                                                                                       },
-                                                                                                       _ => true
-                                                                                               }
-                                                                                       });
-                                                                                       e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
-                                                                               }
-                                                                               hash_map::Entry::Vacant(entry) => {
-                                                                                       entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
-                                                                               }
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                               if let Some(ref txid) = self.current_counterparty_commitment_txid {
-                                       check_htlc_fails!(txid, "current");
-                               }
-                               if let Some(ref txid) = self.prev_counterparty_commitment_txid {
-                                       check_htlc_fails!(txid, "counterparty");
-                               }
-                               // No need to check holder commitment txn, symmetric HTLCSource must be present as per-htlc data on counterparty commitment tx
-                       }
-               } else if let Some(per_commitment_data) = per_commitment_option {
-                       // While this isn't useful yet, there is a potential race where if a counterparty
-                       // revokes a state at the same time as the commitment transaction for that state is
-                       // confirmed, and the watchtower receives the block before the user, the user could
-                       // upload a new ChannelMonitor with the revocation secret but the watchtower has
-                       // already processed the block, resulting in the counterparty_commitment_txn_on_chain entry
-                       // not being generated by the above conditional. Thus, to be safe, we go ahead and
-                       // insert it here.
-                       watch_outputs.append(&mut tx.output.clone());
-                       self.counterparty_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
-
-                       log_trace!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid);
-
-                       macro_rules! check_htlc_fails {
-                               ($txid: expr, $commitment_tx: expr, $id: tt) => {
-                                       if let Some(ref latest_outpoints) = self.counterparty_claimable_outpoints.get($txid) {
-                                               $id: for &(ref htlc, ref source_option) in latest_outpoints.iter() {
-                                                       if let &Some(ref source) = source_option {
-                                                               // Check if the HTLC is present in the commitment transaction that was
-                                                               // broadcast, but not if it was below the dust limit, which we should
-                                                               // fail backwards immediately as there is no way for us to learn the
-                                                               // payment_preimage.
-                                                               // Note that if the dust limit were allowed to change between
-                                                               // commitment transactions we'd want to be check whether *any*
-                                                               // broadcastable commitment transaction has the HTLC in it, but it
-                                                               // cannot currently change after channel initialization, so we don't
-                                                               // need to here.
-                                                               for &(ref broadcast_htlc, ref broadcast_source) in per_commitment_data.iter() {
-                                                                       if broadcast_htlc.transaction_output_index.is_some() && Some(source) == broadcast_source.as_ref() {
-                                                                               continue $id;
-                                                                       }
-                                                               }
-                                                               log_trace!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of counterparty commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
-                                                               match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
-                                                                       hash_map::Entry::Occupied(mut entry) => {
-                                                                               let e = entry.get_mut();
-                                                                               e.retain(|ref event| {
-                                                                                       match **event {
-                                                                                               OnchainEvent::HTLCUpdate { ref htlc_update } => {
-                                                                                                       return htlc_update.0 != **source
-                                                                                               },
-                                                                                               _ => true
-                                                                                       }
-                                                                               });
-                                                                               e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
-                                                                       }
-                                                                       hash_map::Entry::Vacant(entry) => {
-                                                                               entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-                       if let Some(ref txid) = self.current_counterparty_commitment_txid {
-                               check_htlc_fails!(txid, "current", 'current_loop);
-                       }
-                       if let Some(ref txid) = self.prev_counterparty_commitment_txid {
-                               check_htlc_fails!(txid, "previous", 'prev_loop);
-                       }
-
-                       if let Some(revocation_points) = self.their_cur_revocation_points {
-                               let revocation_point_option =
-                                       if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
-                                       else if let Some(point) = revocation_points.2.as_ref() {
-                                               if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
-                                       } else { None };
-                               if let Some(revocation_point) = revocation_point_option {
-                                       self.counterparty_payment_script = {
-                                               // Note that the Network here is ignored as we immediately drop the address for the
-                                               // script_pubkey version
-                                               let payment_hash160 = WPubkeyHash::hash(&self.keys.pubkeys().payment_point.serialize());
-                                               Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script()
-                                       };
-
-                                       // Then, try to find htlc outputs
-                                       for (_, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
-                                               if let Some(transaction_output_index) = htlc.transaction_output_index {
-                                                       if transaction_output_index as usize >= tx.output.len() ||
-                                                                       tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 {
-                                                               return (claimable_outpoints, (commitment_txid, watch_outputs)); // Corrupted per_commitment_data, fuck this user
-                                                       }
-                                                       let preimage = if htlc.offered { if let Some(p) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) } else { None } } else { None };
-                                                       let aggregable = if !htlc.offered { false } else { true };
-                                                       if preimage.is_some() || !htlc.offered {
-                                                               let witness_data = InputMaterial::CounterpartyHTLC { per_commitment_point: *revocation_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key, preimage, htlc: htlc.clone() };
-                                                               claimable_outpoints.push(ClaimRequest { absolute_timelock: htlc.cltv_expiry, aggregable, outpoint: BitcoinOutPoint { txid: commitment_txid, vout: transaction_output_index }, witness_data });
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-               }
-               (claimable_outpoints, (commitment_txid, watch_outputs))
-       }
-
-       /// Attempts to claim a counterparty HTLC-Success/HTLC-Timeout's outputs using the revocation key
-       fn check_spend_counterparty_htlc<L: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, logger: &L) -> (Vec<ClaimRequest>, Option<(Txid, Vec<TxOut>)>) where L::Target: Logger {
-               let htlc_txid = tx.txid();
-               if tx.input.len() != 1 || tx.output.len() != 1 || tx.input[0].witness.len() != 5 {
-                       return (Vec::new(), None)
-               }
-
-               macro_rules! ignore_error {
-                       ( $thing : expr ) => {
-                               match $thing {
-                                       Ok(a) => a,
-                                       Err(_) => return (Vec::new(), None)
-                               }
-                       };
-               }
-
-               let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); };
-               let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
-               let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
-
-               log_trace!(logger, "Counterparty HTLC broadcast {}:{}", htlc_txid, 0);
-               let witness_data = InputMaterial::Revoked { per_commitment_point, counterparty_delayed_payment_base_key: self.counterparty_tx_cache.counterparty_delayed_payment_base_key, counterparty_htlc_base_key: self.counterparty_tx_cache.counterparty_htlc_base_key,  per_commitment_key, input_descriptor: InputDescriptors::RevokedOutput, amount: tx.output[0].value, htlc: None, on_counterparty_tx_csv: self.counterparty_tx_cache.on_counterparty_tx_csv };
-               let claimable_outpoints = vec!(ClaimRequest { absolute_timelock: height + self.counterparty_tx_cache.on_counterparty_tx_csv as u32, aggregable: true, outpoint: BitcoinOutPoint { txid: htlc_txid, vout: 0}, witness_data });
-               (claimable_outpoints, Some((htlc_txid, tx.output.clone())))
-       }
-
-       fn broadcast_by_holder_state(&self, commitment_tx: &Transaction, holder_tx: &HolderSignedTx) -> (Vec<ClaimRequest>, Vec<TxOut>, Option<(Script, PublicKey, PublicKey)>) {
-               let mut claim_requests = Vec::with_capacity(holder_tx.htlc_outputs.len());
-               let mut watch_outputs = Vec::with_capacity(holder_tx.htlc_outputs.len());
-
-               let redeemscript = chan_utils::get_revokeable_redeemscript(&holder_tx.revocation_key, self.on_holder_tx_csv, &holder_tx.delayed_payment_key);
-               let broadcasted_holder_revokable_script = Some((redeemscript.to_v0_p2wsh(), holder_tx.per_commitment_point.clone(), holder_tx.revocation_key.clone()));
-
-               for &(ref htlc, _, _) in holder_tx.htlc_outputs.iter() {
-                       if let Some(transaction_output_index) = htlc.transaction_output_index {
-                               claim_requests.push(ClaimRequest { absolute_timelock: ::std::u32::MAX, aggregable: false, outpoint: BitcoinOutPoint { txid: holder_tx.txid, vout: transaction_output_index as u32 },
-                                       witness_data: InputMaterial::HolderHTLC {
-                                               preimage: if !htlc.offered {
-                                                               if let Some(preimage) = self.payment_preimages.get(&htlc.payment_hash) {
-                                                                       Some(preimage.clone())
-                                                               } else {
-                                                                       // We can't build an HTLC-Success transaction without the preimage
-                                                                       continue;
-                                                               }
-                                                       } else { None },
-                                               amount: htlc.amount_msat,
-                               }});
-                               watch_outputs.push(commitment_tx.output[transaction_output_index as usize].clone());
-                       }
-               }
-
-               (claim_requests, watch_outputs, broadcasted_holder_revokable_script)
-       }
-
-       /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
-       /// revoked using data in holder_claimable_outpoints.
-       /// Should not be used if check_spend_revoked_transaction succeeds.
-       fn check_spend_holder_transaction<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) -> (Vec<ClaimRequest>, (Txid, Vec<TxOut>)) where L::Target: Logger {
-               let commitment_txid = tx.txid();
-               let mut claim_requests = Vec::new();
-               let mut watch_outputs = Vec::new();
-
-               macro_rules! wait_threshold_conf {
-                       ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
-                               log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
-                               match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
-                                       hash_map::Entry::Occupied(mut entry) => {
-                                               let e = entry.get_mut();
-                                               e.retain(|ref event| {
-                                                       match **event {
-                                                               OnchainEvent::HTLCUpdate { ref htlc_update } => {
-                                                                       return htlc_update.0 != $source
-                                                               },
-                                                               _ => true
-                                                       }
-                                               });
-                                               e.push(OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)});
-                                       }
-                                       hash_map::Entry::Vacant(entry) => {
-                                               entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}]);
-                                       }
-                               }
-                       }
-               }
-
-               macro_rules! append_onchain_update {
-                       ($updates: expr) => {
-                               claim_requests = $updates.0;
-                               watch_outputs.append(&mut $updates.1);
-                               self.broadcasted_holder_revokable_script = $updates.2;
-                       }
-               }
-
-               // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
-               let mut is_holder_tx = false;
-
-               if self.current_holder_commitment_tx.txid == commitment_txid {
-                       is_holder_tx = true;
-                       log_trace!(logger, "Got latest holder commitment tx broadcast, searching for available HTLCs to claim");
-                       let mut res = self.broadcast_by_holder_state(tx, &self.current_holder_commitment_tx);
-                       append_onchain_update!(res);
-               } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
-                       if holder_tx.txid == commitment_txid {
-                               is_holder_tx = true;
-                               log_trace!(logger, "Got previous holder commitment tx broadcast, searching for available HTLCs to claim");
-                               let mut res = self.broadcast_by_holder_state(tx, holder_tx);
-                               append_onchain_update!(res);
-                       }
-               }
-
-               macro_rules! fail_dust_htlcs_after_threshold_conf {
-                       ($holder_tx: expr) => {
-                               for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs {
-                                       if htlc.transaction_output_index.is_none() {
-                                               if let &Some(ref source) = source {
-                                                       wait_threshold_conf!(height, source.clone(), "lastest", htlc.payment_hash.clone());
-                                               }
-                                       }
-                               }
-                       }
-               }
-
-               if is_holder_tx {
-                       fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx);
-                       if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx {
-                               fail_dust_htlcs_after_threshold_conf!(holder_tx);
-                       }
-               }
-
-               (claim_requests, (commitment_txid, watch_outputs))
-       }
-
-       /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of
-       /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of
-       /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows
-       /// a higher revocation secret than the holder commitment number we are aware of. Broadcasting these
-       /// transactions are UNSAFE, as they allow counterparty side to punish you. Nevertheless you may want to
-       /// broadcast them if counterparty don't close channel with his higher commitment transaction after a
-       /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
-       /// out-of-band the other node operator to coordinate with him if option is available to you.
-       /// In any-case, choice is up to the user.
-       pub fn get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
-               log_trace!(logger, "Getting signed latest holder commitment transaction!");
-               self.holder_tx_signed = true;
-               if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
-                       let txid = commitment_tx.txid();
-                       let mut res = vec![commitment_tx];
-                       for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
-                               if let Some(vout) = htlc.0.transaction_output_index {
-                                       let preimage = if !htlc.0.offered {
-                                                       if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
-                                                               // We can't build an HTLC-Success transaction without the preimage
-                                                               continue;
-                                                       }
-                                               } else { None };
-                                       if let Some(htlc_tx) = self.onchain_tx_handler.get_fully_signed_htlc_tx(
-                                                       &::bitcoin::OutPoint { txid, vout }, &preimage) {
-                                               res.push(htlc_tx);
-                                       }
-                               }
-                       }
-                       // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
-                       // The data will be re-generated and tracked in check_spend_holder_transaction if we get a confirmation.
-                       return res
-               }
-               Vec::new()
-       }
-
-       /// Unsafe test-only version of get_latest_holder_commitment_txn used by our test framework
-       /// to bypass HolderCommitmentTransaction state update lockdown after signature and generate
-       /// revoked commitment transaction.
-       #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       pub fn unsafe_get_latest_holder_commitment_txn<L: Deref>(&mut self, logger: &L) -> Vec<Transaction> where L::Target: Logger {
-               log_trace!(logger, "Getting signed copy of latest holder commitment transaction!");
-               if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_copy_holder_tx(&self.funding_redeemscript) {
-                       let txid = commitment_tx.txid();
-                       let mut res = vec![commitment_tx];
-                       for htlc in self.current_holder_commitment_tx.htlc_outputs.iter() {
-                               if let Some(vout) = htlc.0.transaction_output_index {
-                                       let preimage = if !htlc.0.offered {
-                                                       if let Some(preimage) = self.payment_preimages.get(&htlc.0.payment_hash) { Some(preimage.clone()) } else {
-                                                               // We can't build an HTLC-Success transaction without the preimage
-                                                               continue;
-                                                       }
-                                               } else { None };
-                                       if let Some(htlc_tx) = self.onchain_tx_handler.unsafe_get_fully_signed_htlc_tx(
-                                                       &::bitcoin::OutPoint { txid, vout }, &preimage) {
-                                               res.push(htlc_tx);
-                                       }
-                               }
-                       }
-                       return res
-               }
-               Vec::new()
-       }
-
-       /// Called by SimpleManyChannelMonitor::block_connected, which implements
-       /// ChainListener::block_connected.
-       /// Eventually this should be pub and, roughly, implement ChainListener, however this requires
-       /// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of
-       /// on-chain.
-       fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L)-> Vec<(Txid, Vec<TxOut>)>
-               where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                                       L::Target: Logger,
-       {
-               for tx in txn_matched {
-                       let mut output_val = 0;
-                       for out in tx.output.iter() {
-                               if out.value > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); }
-                               output_val += out.value;
-                               if output_val > 21_000_000_0000_0000 { panic!("Value-overflowing transaction provided to block connected"); }
-                       }
-               }
-
-               log_trace!(logger, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
-               let mut watch_outputs = Vec::new();
-               let mut claimable_outpoints = Vec::new();
-               for tx in txn_matched {
-                       if tx.input.len() == 1 {
-                               // Assuming our keys were not leaked (in which case we're screwed no matter what),
-                               // commitment transactions and HTLC transactions will all only ever have one input,
-                               // which is an easy way to filter out any potential non-matching txn for lazy
-                               // filters.
-                               let prevout = &tx.input[0].previous_output;
-                               if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 {
-                                       if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
-                                               let (mut new_outpoints, new_outputs) = self.check_spend_counterparty_transaction(&tx, height, &logger);
-                                               if !new_outputs.1.is_empty() {
-                                                       watch_outputs.push(new_outputs);
-                                               }
-                                               if new_outpoints.is_empty() {
-                                                       let (mut new_outpoints, new_outputs) = self.check_spend_holder_transaction(&tx, height, &logger);
-                                                       if !new_outputs.1.is_empty() {
-                                                               watch_outputs.push(new_outputs);
-                                                       }
-                                                       claimable_outpoints.append(&mut new_outpoints);
-                                               }
-                                               claimable_outpoints.append(&mut new_outpoints);
-                                       }
-                               } else {
-                                       if let Some(&(commitment_number, _)) = self.counterparty_commitment_txn_on_chain.get(&prevout.txid) {
-                                               let (mut new_outpoints, new_outputs_option) = self.check_spend_counterparty_htlc(&tx, commitment_number, height, &logger);
-                                               claimable_outpoints.append(&mut new_outpoints);
-                                               if let Some(new_outputs) = new_outputs_option {
-                                                       watch_outputs.push(new_outputs);
-                                               }
-                                       }
-                               }
-                       }
-                       // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
-                       // can also be resolved in a few other ways which can have more than one output. Thus,
-                       // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
-                       self.is_resolving_htlc_output(&tx, height, &logger);
-
-                       self.is_paying_spendable_output(&tx, height, &logger);
-               }
-               let should_broadcast = self.would_broadcast_at_height(height, &logger);
-               if should_broadcast {
-                       claimable_outpoints.push(ClaimRequest { absolute_timelock: height, aggregable: false, outpoint: BitcoinOutPoint { txid: self.funding_info.0.txid.clone(), vout: self.funding_info.0.index as u32 }, witness_data: InputMaterial::Funding { funding_redeemscript: self.funding_redeemscript.clone() }});
-               }
-               if should_broadcast {
-                       self.pending_monitor_events.push(MonitorEvent::CommitmentTxBroadcasted(self.funding_info.0));
-                       if let Some(commitment_tx) = self.onchain_tx_handler.get_fully_signed_holder_tx(&self.funding_redeemscript) {
-                               self.holder_tx_signed = true;
-                               let (mut new_outpoints, new_outputs, _) = self.broadcast_by_holder_state(&commitment_tx, &self.current_holder_commitment_tx);
-                               if !new_outputs.is_empty() {
-                                       watch_outputs.push((self.current_holder_commitment_tx.txid.clone(), new_outputs));
-                               }
-                               claimable_outpoints.append(&mut new_outpoints);
-                       }
-               }
-               if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
-                       for ev in events {
-                               match ev {
-                                       OnchainEvent::HTLCUpdate { htlc_update } => {
-                                               log_trace!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
-                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
-                                                       payment_hash: htlc_update.1,
-                                                       payment_preimage: None,
-                                                       source: htlc_update.0,
-                                               }));
-                                       },
-                                       OnchainEvent::MaturingOutput { descriptor } => {
-                                               log_trace!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor));
-                                               self.pending_events.push(Event::SpendableOutputs {
-                                                       outputs: vec![descriptor]
-                                               });
-                                       }
-                               }
-                       }
-               }
-
-               self.onchain_tx_handler.block_connected(txn_matched, claimable_outpoints, height, &*broadcaster, &*fee_estimator, &*logger);
-
-               self.last_block_hash = block_hash.clone();
-               for &(ref txid, ref output_scripts) in watch_outputs.iter() {
-                       self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect());
-               }
-
-               watch_outputs
-       }
-
-       fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, block_hash: &BlockHash, broadcaster: B, fee_estimator: F, logger: L)
-               where B::Target: BroadcasterInterface,
-                     F::Target: FeeEstimator,
-                     L::Target: Logger,
-       {
-               log_trace!(logger, "Block {} at height {} disconnected", block_hash, height);
-               if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
-                       //We may discard:
-                       //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
-                       //- maturing spendable output has transaction paying us has been disconnected
-               }
-
-               self.onchain_tx_handler.block_disconnected(height, broadcaster, fee_estimator, logger);
-
-               self.last_block_hash = block_hash.clone();
-       }
-
-       fn would_broadcast_at_height<L: Deref>(&self, height: u32, logger: &L) -> bool where L::Target: Logger {
-               // We need to consider all HTLCs which are:
-               //  * in any unrevoked counterparty commitment transaction, as they could broadcast said
-               //    transactions and we'd end up in a race, or
-               //  * are in our latest holder commitment transaction, as this is the thing we will
-               //    broadcast if we go on-chain.
-               // Note that we consider HTLCs which were below dust threshold here - while they don't
-               // strictly imply that we need to fail the channel, we need to go ahead and fail them back
-               // to the source, and if we don't fail the channel we will have to ensure that the next
-               // updates that peer sends us are update_fails, failing the channel if not. It's probably
-               // easier to just fail the channel as this case should be rare enough anyway.
-               macro_rules! scan_commitment {
-                       ($htlcs: expr, $holder_tx: expr) => {
-                               for ref htlc in $htlcs {
-                                       // For inbound HTLCs which we know the preimage for, we have to ensure we hit the
-                                       // chain with enough room to claim the HTLC without our counterparty being able to
-                                       // time out the HTLC first.
-                                       // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
-                                       // concern is being able to claim the corresponding inbound HTLC (on another
-                                       // channel) before it expires. In fact, we don't even really care if our
-                                       // counterparty here claims such an outbound HTLC after it expired as long as we
-                                       // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
-                                       // chain when our counterparty is waiting for expiration to off-chain fail an HTLC
-                                       // we give ourselves a few blocks of headroom after expiration before going
-                                       // on-chain for an expired HTLC.
-                                       // Note that, to avoid a potential attack whereby a node delays claiming an HTLC
-                                       // from us until we've reached the point where we go on-chain with the
-                                       // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
-                                       // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
-                                       //  aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
-                                       //      inbound_cltv == height + CLTV_CLAIM_BUFFER
-                                       //      outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
-                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
-                                       //      CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
-                                       //      LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
-                                       //  The final, above, condition is checked for statically in channelmanager
-                                       //  with CHECK_CLTV_EXPIRY_SANITY_2.
-                                       let htlc_outbound = $holder_tx == htlc.offered;
-                                       if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
-                                          (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
-                                               log_info!(logger, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
-                                               return true;
-                                       }
-                               }
-                       }
-               }
-
-               scan_commitment!(self.current_holder_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true);
-
-               if let Some(ref txid) = self.current_counterparty_commitment_txid {
-                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
-                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
-                       }
-               }
-               if let Some(ref txid) = self.prev_counterparty_commitment_txid {
-                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(txid) {
-                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
-                       }
-               }
-
-               false
-       }
-
-       /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a holder
-       /// or counterparty commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
-       fn is_resolving_htlc_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
-               'outer_loop: for input in &tx.input {
-                       let mut payment_data = None;
-                       let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
-                               || (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::AcceptedHTLC) && input.witness[1].len() == 33);
-                       let accepted_preimage_claim = input.witness.len() == 5 && HTLCType::scriptlen_to_htlctype(input.witness[4].len()) == Some(HTLCType::AcceptedHTLC);
-                       let offered_preimage_claim = input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC);
-
-                       macro_rules! log_claim {
-                               ($tx_info: expr, $holder_tx: expr, $htlc: expr, $source_avail: expr) => {
-                                       // We found the output in question, but aren't failing it backwards
-                                       // as we have no corresponding source and no valid counterparty commitment txid
-                                       // to try a weak source binding with same-hash, same-value still-valid offered HTLC.
-                                       // This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction.
-                                       let outbound_htlc = $holder_tx == $htlc.offered;
-                                       if ($holder_tx && revocation_sig_claim) ||
-                                                       (outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) {
-                                               log_error!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
-                                                       $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
-                                                       if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
-                                                       if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" });
-                                       } else {
-                                               log_info!(logger, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
-                                                       $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
-                                                       if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
-                                                       if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" });
-                                       }
-                               }
-                       }
-
-                       macro_rules! check_htlc_valid_counterparty {
-                               ($counterparty_txid: expr, $htlc_output: expr) => {
-                                       if let Some(txid) = $counterparty_txid {
-                                               for &(ref pending_htlc, ref pending_source) in self.counterparty_claimable_outpoints.get(&txid).unwrap() {
-                                                       if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
-                                                               if let &Some(ref source) = pending_source {
-                                                                       log_claim!("revoked counterparty commitment tx", false, pending_htlc, true);
-                                                                       payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
-                                                                       break;
-                                                               }
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-
-                       macro_rules! scan_commitment {
-                               ($htlcs: expr, $tx_info: expr, $holder_tx: expr) => {
-                                       for (ref htlc_output, source_option) in $htlcs {
-                                               if Some(input.previous_output.vout) == htlc_output.transaction_output_index {
-                                                       if let Some(ref source) = source_option {
-                                                               log_claim!($tx_info, $holder_tx, htlc_output, true);
-                                                               // We have a resolution of an HTLC either from one of our latest
-                                                               // holder commitment transactions or an unrevoked counterparty commitment
-                                                               // transaction. This implies we either learned a preimage, the HTLC
-                                                               // has timed out, or we screwed up. In any case, we should now
-                                                               // resolve the source HTLC with the original sender.
-                                                               payment_data = Some(((*source).clone(), htlc_output.payment_hash));
-                                                       } else if !$holder_tx {
-                                                                       check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output);
-                                                               if payment_data.is_none() {
-                                                                       check_htlc_valid_counterparty!(self.prev_counterparty_commitment_txid, htlc_output);
-                                                               }
-                                                       }
-                                                       if payment_data.is_none() {
-                                                               log_claim!($tx_info, $holder_tx, htlc_output, false);
-                                                               continue 'outer_loop;
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-
-                       if input.previous_output.txid == self.current_holder_commitment_tx.txid {
-                               scan_commitment!(self.current_holder_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
-                                       "our latest holder commitment tx", true);
-                       }
-                       if let Some(ref prev_holder_signed_commitment_tx) = self.prev_holder_signed_commitment_tx {
-                               if input.previous_output.txid == prev_holder_signed_commitment_tx.txid {
-                                       scan_commitment!(prev_holder_signed_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
-                                               "our previous holder commitment tx", true);
-                               }
-                       }
-                       if let Some(ref htlc_outputs) = self.counterparty_claimable_outpoints.get(&input.previous_output.txid) {
-                               scan_commitment!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))),
-                                       "counterparty commitment tx", false);
-                       }
-
-                       // Check that scan_commitment, above, decided there is some source worth relaying an
-                       // HTLC resolution backwards to and figure out whether we learned a preimage from it.
-                       if let Some((source, payment_hash)) = payment_data {
-                               let mut payment_preimage = PaymentPreimage([0; 32]);
-                               if accepted_preimage_claim {
-                                       if !self.pending_monitor_events.iter().any(
-                                               |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { upd.source == source } else { false }) {
-                                               payment_preimage.0.copy_from_slice(&input.witness[3]);
-                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
-                                                       source,
-                                                       payment_preimage: Some(payment_preimage),
-                                                       payment_hash
-                                               }));
-                                       }
-                               } else if offered_preimage_claim {
-                                       if !self.pending_monitor_events.iter().any(
-                                               |update| if let &MonitorEvent::HTLCEvent(ref upd) = update {
-                                                       upd.source == source
-                                               } else { false }) {
-                                               payment_preimage.0.copy_from_slice(&input.witness[1]);
-                                               self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate {
-                                                       source,
-                                                       payment_preimage: Some(payment_preimage),
-                                                       payment_hash
-                                               }));
-                                       }
-                               } else {
-                                       log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
-                                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
-                                               hash_map::Entry::Occupied(mut entry) => {
-                                                       let e = entry.get_mut();
-                                                       e.retain(|ref event| {
-                                                               match **event {
-                                                                       OnchainEvent::HTLCUpdate { ref htlc_update } => {
-                                                                               return htlc_update.0 != source
-                                                                       },
-                                                                       _ => true
-                                                               }
-                                                       });
-                                                       e.push(OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)});
-                                               }
-                                               hash_map::Entry::Vacant(entry) => {
-                                                       entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)}]);
-                                               }
-                                       }
-                               }
-                       }
-               }
-       }
-
-       /// Check if any transaction broadcasted is paying fund back to some address we can assume to own
-       fn is_paying_spendable_output<L: Deref>(&mut self, tx: &Transaction, height: u32, logger: &L) where L::Target: Logger {
-               let mut spendable_output = None;
-               for (i, outp) in tx.output.iter().enumerate() { // There is max one spendable output for any channel tx, including ones generated by us
-                       if i > ::std::u16::MAX as usize {
-                               // While it is possible that an output exists on chain which is greater than the
-                               // 2^16th output in a given transaction, this is only possible if the output is not
-                               // in a lightning transaction and was instead placed there by some third party who
-                               // wishes to give us money for no reason.
-                               // Namely, any lightning transactions which we pre-sign will never have anywhere
-                               // near 2^16 outputs both because such transactions must have ~2^16 outputs who's
-                               // scripts are not longer than one byte in length and because they are inherently
-                               // non-standard due to their size.
-                               // Thus, it is completely safe to ignore such outputs, and while it may result in
-                               // us ignoring non-lightning fund to us, that is only possible if someone fills
-                               // nearly a full block with garbage just to hit this case.
-                               continue;
-                       }
-                       if outp.script_pubkey == self.destination_script {
-                               spendable_output =  Some(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
-                                       output: outp.clone(),
-                               });
-                               break;
-                       } else if let Some(ref broadcasted_holder_revokable_script) = self.broadcasted_holder_revokable_script {
-                               if broadcasted_holder_revokable_script.0 == outp.script_pubkey {
-                                       spendable_output =  Some(SpendableOutputDescriptor::DynamicOutputP2WSH {
-                                               outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
-                                               per_commitment_point: broadcasted_holder_revokable_script.1,
-                                               to_self_delay: self.on_holder_tx_csv,
-                                               output: outp.clone(),
-                                               key_derivation_params: self.keys.key_derivation_params(),
-                                               revocation_pubkey: broadcasted_holder_revokable_script.2.clone(),
-                                       });
-                                       break;
-                               }
-                       } else if self.counterparty_payment_script == outp.script_pubkey {
-                               spendable_output = Some(SpendableOutputDescriptor::StaticOutputCounterpartyPayment {
-                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
-                                       output: outp.clone(),
-                                       key_derivation_params: self.keys.key_derivation_params(),
-                               });
-                               break;
-                       } else if outp.script_pubkey == self.shutdown_script {
-                               spendable_output = Some(SpendableOutputDescriptor::StaticOutput {
-                                       outpoint: OutPoint { txid: tx.txid(), index: i as u16 },
-                                       output: outp.clone(),
-                               });
-                       }
-               }
-               if let Some(spendable_output) = spendable_output {
-                       log_trace!(logger, "Maturing {} until {}", log_spendable!(spendable_output), height + ANTI_REORG_DELAY - 1);
-                       match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
-                               hash_map::Entry::Occupied(mut entry) => {
-                                       let e = entry.get_mut();
-                                       e.push(OnchainEvent::MaturingOutput { descriptor: spendable_output });
-                               }
-                               hash_map::Entry::Vacant(entry) => {
-                                       entry.insert(vec![OnchainEvent::MaturingOutput { descriptor: spendable_output }]);
-                               }
-                       }
-               }
-       }
-}
-
-const MAX_ALLOC_SIZE: usize = 64*1024;
-
-impl<ChanSigner: ChannelKeys + Readable> Readable for (BlockHash, ChannelMonitor<ChanSigner>) {
-       fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
-               macro_rules! unwrap_obj {
-                       ($key: expr) => {
-                               match $key {
-                                       Ok(res) => res,
-                                       Err(_) => return Err(DecodeError::InvalidValue),
-                               }
-                       }
-               }
-
-               let _ver: u8 = Readable::read(reader)?;
-               let min_ver: u8 = Readable::read(reader)?;
-               if min_ver > SERIALIZATION_VERSION {
-                       return Err(DecodeError::UnknownVersion);
-               }
-
-               let latest_update_id: u64 = Readable::read(reader)?;
-               let commitment_transaction_number_obscure_factor = <U48 as Readable>::read(reader)?.0;
-
-               let destination_script = Readable::read(reader)?;
-               let broadcasted_holder_revokable_script = match <u8 as Readable>::read(reader)? {
-                       0 => {
-                               let revokable_address = Readable::read(reader)?;
-                               let per_commitment_point = Readable::read(reader)?;
-                               let revokable_script = Readable::read(reader)?;
-                               Some((revokable_address, per_commitment_point, revokable_script))
-                       },
-                       1 => { None },
-                       _ => return Err(DecodeError::InvalidValue),
-               };
-               let counterparty_payment_script = Readable::read(reader)?;
-               let shutdown_script = Readable::read(reader)?;
-
-               let keys = Readable::read(reader)?;
-               // Technically this can fail and serialize fail a round-trip, but only for serialization of
-               // barely-init'd ChannelMonitors that we can't do anything with.
-               let outpoint = OutPoint {
-                       txid: Readable::read(reader)?,
-                       index: Readable::read(reader)?,
-               };
-               let funding_info = (outpoint, Readable::read(reader)?);
-               let current_counterparty_commitment_txid = Readable::read(reader)?;
-               let prev_counterparty_commitment_txid = Readable::read(reader)?;
-
-               let counterparty_tx_cache = Readable::read(reader)?;
-               let funding_redeemscript = Readable::read(reader)?;
-               let channel_value_satoshis = Readable::read(reader)?;
-
-               let their_cur_revocation_points = {
-                       let first_idx = <U48 as Readable>::read(reader)?.0;
-                       if first_idx == 0 {
-                               None
-                       } else {
-                               let first_point = Readable::read(reader)?;
-                               let second_point_slice: [u8; 33] = Readable::read(reader)?;
-                               if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 {
-                                       Some((first_idx, first_point, None))
-                               } else {
-                                       Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice)))))
-                               }
-                       }
-               };
-
-               let on_holder_tx_csv: u16 = Readable::read(reader)?;
-
-               let commitment_secrets = Readable::read(reader)?;
-
-               macro_rules! read_htlc_in_commitment {
-                       () => {
-                               {
-                                       let offered: bool = Readable::read(reader)?;
-                                       let amount_msat: u64 = Readable::read(reader)?;
-                                       let cltv_expiry: u32 = Readable::read(reader)?;
-                                       let payment_hash: PaymentHash = Readable::read(reader)?;
-                                       let transaction_output_index: Option<u32> = Readable::read(reader)?;
-
-                                       HTLCOutputInCommitment {
-                                               offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index
-                                       }
-                               }
-                       }
-               }
-
-               let counterparty_claimable_outpoints_len: u64 = Readable::read(reader)?;
-               let mut counterparty_claimable_outpoints = HashMap::with_capacity(cmp::min(counterparty_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
-               for _ in 0..counterparty_claimable_outpoints_len {
-                       let txid: Txid = Readable::read(reader)?;
-                       let htlcs_count: u64 = Readable::read(reader)?;
-                       let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
-                       for _ in 0..htlcs_count {
-                               htlcs.push((read_htlc_in_commitment!(), <Option<HTLCSource> as Readable>::read(reader)?.map(|o: HTLCSource| Box::new(o))));
-                       }
-                       if let Some(_) = counterparty_claimable_outpoints.insert(txid, htlcs) {
-                               return Err(DecodeError::InvalidValue);
-                       }
-               }
-
-               let counterparty_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
-               let mut counterparty_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(counterparty_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
-               for _ in 0..counterparty_commitment_txn_on_chain_len {
-                       let txid: Txid = Readable::read(reader)?;
-                       let commitment_number = <U48 as Readable>::read(reader)?.0;
-                       let outputs_count = <u64 as Readable>::read(reader)?;
-                       let mut outputs = Vec::with_capacity(cmp::min(outputs_count as usize, MAX_ALLOC_SIZE / 8));
-                       for _ in 0..outputs_count {
-                               outputs.push(Readable::read(reader)?);
-                       }
-                       if let Some(_) = counterparty_commitment_txn_on_chain.insert(txid, (commitment_number, outputs)) {
-                               return Err(DecodeError::InvalidValue);
-                       }
-               }
-
-               let counterparty_hash_commitment_number_len: u64 = Readable::read(reader)?;
-               let mut counterparty_hash_commitment_number = HashMap::with_capacity(cmp::min(counterparty_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
-               for _ in 0..counterparty_hash_commitment_number_len {
-                       let payment_hash: PaymentHash = Readable::read(reader)?;
-                       let commitment_number = <U48 as Readable>::read(reader)?.0;
-                       if let Some(_) = counterparty_hash_commitment_number.insert(payment_hash, commitment_number) {
-                               return Err(DecodeError::InvalidValue);
-                       }
-               }
-
-               macro_rules! read_holder_tx {
-                       () => {
-                               {
-                                       let txid = Readable::read(reader)?;
-                                       let revocation_key = Readable::read(reader)?;
-                                       let a_htlc_key = Readable::read(reader)?;
-                                       let b_htlc_key = Readable::read(reader)?;
-                                       let delayed_payment_key = Readable::read(reader)?;
-                                       let per_commitment_point = Readable::read(reader)?;
-                                       let feerate_per_kw: u32 = Readable::read(reader)?;
-
-                                       let htlcs_len: u64 = Readable::read(reader)?;
-                                       let mut htlcs = Vec::with_capacity(cmp::min(htlcs_len as usize, MAX_ALLOC_SIZE / 128));
-                                       for _ in 0..htlcs_len {
-                                               let htlc = read_htlc_in_commitment!();
-                                               let sigs = match <u8 as Readable>::read(reader)? {
-                                                       0 => None,
-                                                       1 => Some(Readable::read(reader)?),
-                                                       _ => return Err(DecodeError::InvalidValue),
-                                               };
-                                               htlcs.push((htlc, sigs, Readable::read(reader)?));
-                                       }
-
-                                       HolderSignedTx {
-                                               txid,
-                                               revocation_key, a_htlc_key, b_htlc_key, delayed_payment_key, per_commitment_point, feerate_per_kw,
-                                               htlc_outputs: htlcs
-                                       }
-                               }
-                       }
-               }
-
-               let prev_holder_signed_commitment_tx = match <u8 as Readable>::read(reader)? {
-                       0 => None,
-                       1 => {
-                               Some(read_holder_tx!())
-                       },
-                       _ => return Err(DecodeError::InvalidValue),
-               };
-               let current_holder_commitment_tx = read_holder_tx!();
-
-               let current_counterparty_commitment_number = <U48 as Readable>::read(reader)?.0;
-               let current_holder_commitment_number = <U48 as Readable>::read(reader)?.0;
-
-               let payment_preimages_len: u64 = Readable::read(reader)?;
-               let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
-               for _ in 0..payment_preimages_len {
-                       let preimage: PaymentPreimage = Readable::read(reader)?;
-                       let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
-                       if let Some(_) = payment_preimages.insert(hash, preimage) {
-                               return Err(DecodeError::InvalidValue);
-                       }
-               }
-
-               let pending_monitor_events_len: u64 = Readable::read(reader)?;
-               let mut pending_monitor_events = Vec::with_capacity(cmp::min(pending_monitor_events_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)));
-               for _ in 0..pending_monitor_events_len {
-                       let ev = match <u8 as Readable>::read(reader)? {
-                               0 => MonitorEvent::HTLCEvent(Readable::read(reader)?),
-                               1 => MonitorEvent::CommitmentTxBroadcasted(funding_info.0),
-                               _ => return Err(DecodeError::InvalidValue)
-                       };
-                       pending_monitor_events.push(ev);
-               }
-
-               let pending_events_len: u64 = Readable::read(reader)?;
-               let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Event>()));
-               for _ in 0..pending_events_len {
-                       if let Some(event) = MaybeReadable::read(reader)? {
-                               pending_events.push(event);
-                       }
-               }
-
-               let last_block_hash: BlockHash = Readable::read(reader)?;
-
-               let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
-               let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
-               for _ in 0..waiting_threshold_conf_len {
-                       let height_target = Readable::read(reader)?;
-                       let events_len: u64 = Readable::read(reader)?;
-                       let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
-                       for _ in 0..events_len {
-                               let ev = match <u8 as Readable>::read(reader)? {
-                                       0 => {
-                                               let htlc_source = Readable::read(reader)?;
-                                               let hash = Readable::read(reader)?;
-                                               OnchainEvent::HTLCUpdate {
-                                                       htlc_update: (htlc_source, hash)
-                                               }
-                                       },
-                                       1 => {
-                                               let descriptor = Readable::read(reader)?;
-                                               OnchainEvent::MaturingOutput {
-                                                       descriptor
-                                               }
-                                       },
-                                       _ => return Err(DecodeError::InvalidValue),
-                               };
-                               events.push(ev);
-                       }
-                       onchain_events_waiting_threshold_conf.insert(height_target, events);
-               }
-
-               let outputs_to_watch_len: u64 = Readable::read(reader)?;
-               let mut outputs_to_watch = HashMap::with_capacity(cmp::min(outputs_to_watch_len as usize, MAX_ALLOC_SIZE / (mem::size_of::<Txid>() + mem::size_of::<Vec<Script>>())));
-               for _ in 0..outputs_to_watch_len {
-                       let txid = Readable::read(reader)?;
-                       let outputs_len: u64 = Readable::read(reader)?;
-                       let mut outputs = Vec::with_capacity(cmp::min(outputs_len as usize, MAX_ALLOC_SIZE / mem::size_of::<Script>()));
-                       for _ in 0..outputs_len {
-                               outputs.push(Readable::read(reader)?);
-                       }
-                       if let Some(_) = outputs_to_watch.insert(txid, outputs) {
-                               return Err(DecodeError::InvalidValue);
-                       }
-               }
-               let onchain_tx_handler = Readable::read(reader)?;
-
-               let lockdown_from_offchain = Readable::read(reader)?;
-               let holder_tx_signed = Readable::read(reader)?;
-
-               Ok((last_block_hash.clone(), ChannelMonitor {
-                       latest_update_id,
-                       commitment_transaction_number_obscure_factor,
-
-                       destination_script,
-                       broadcasted_holder_revokable_script,
-                       counterparty_payment_script,
-                       shutdown_script,
-
-                       keys,
-                       funding_info,
-                       current_counterparty_commitment_txid,
-                       prev_counterparty_commitment_txid,
-
-                       counterparty_tx_cache,
-                       funding_redeemscript,
-                       channel_value_satoshis,
-                       their_cur_revocation_points,
-
-                       on_holder_tx_csv,
-
-                       commitment_secrets,
-                       counterparty_claimable_outpoints,
-                       counterparty_commitment_txn_on_chain,
-                       counterparty_hash_commitment_number,
-
-                       prev_holder_signed_commitment_tx,
-                       current_holder_commitment_tx,
-                       current_counterparty_commitment_number,
-                       current_holder_commitment_number,
-
-                       payment_preimages,
-                       pending_monitor_events,
-                       pending_events,
-
-                       onchain_events_waiting_threshold_conf,
-                       outputs_to_watch,
-
-                       onchain_tx_handler,
-
-                       lockdown_from_offchain,
-                       holder_tx_signed,
-
-                       last_block_hash,
-                       secp_ctx: Secp256k1::new(),
-               }))
-       }
-}
-
-#[cfg(test)]
-mod tests {
-       use bitcoin::blockdata::script::{Script, Builder};
-       use bitcoin::blockdata::opcodes;
-       use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
-       use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
-       use bitcoin::util::bip143;
-       use bitcoin::hashes::Hash;
-       use bitcoin::hashes::sha256::Hash as Sha256;
-       use bitcoin::hashes::hex::FromHex;
-       use bitcoin::hash_types::Txid;
-       use hex;
-       use chain::transaction::OutPoint;
-       use ln::channelmanager::{PaymentPreimage, PaymentHash};
-       use ln::channelmonitor::ChannelMonitor;
-       use ln::onchaintx::{OnchainTxHandler, InputDescriptors};
-       use ln::chan_utils;
-       use ln::chan_utils::{HTLCOutputInCommitment, HolderCommitmentTransaction};
-       use util::test_utils::TestLogger;
-       use bitcoin::secp256k1::key::{SecretKey,PublicKey};
-       use bitcoin::secp256k1::Secp256k1;
-       use std::sync::Arc;
-       use chain::keysinterface::InMemoryChannelKeys;
-
-       #[test]
-       fn test_prune_preimages() {
-               let secp_ctx = Secp256k1::new();
-               let logger = Arc::new(TestLogger::new());
-
-               let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
-               let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
-
-               let mut preimages = Vec::new();
-               {
-                       for i in 0..20 {
-                               let preimage = PaymentPreimage([i; 32]);
-                               let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
-                               preimages.push((preimage, hash));
-                       }
-               }
-
-               macro_rules! preimages_slice_to_htlc_outputs {
-                       ($preimages_slice: expr) => {
-                               {
-                                       let mut res = Vec::new();
-                                       for (idx, preimage) in $preimages_slice.iter().enumerate() {
-                                               res.push((HTLCOutputInCommitment {
-                                                       offered: true,
-                                                       amount_msat: 0,
-                                                       cltv_expiry: 0,
-                                                       payment_hash: preimage.1.clone(),
-                                                       transaction_output_index: Some(idx as u32),
-                                               }, None));
-                                       }
-                                       res
-                               }
-                       }
-               }
-               macro_rules! preimages_to_holder_htlcs {
-                       ($preimages_slice: expr) => {
-                               {
-                                       let mut inp = preimages_slice_to_htlc_outputs!($preimages_slice);
-                                       let res: Vec<_> = inp.drain(..).map(|e| { (e.0, None, e.1) }).collect();
-                                       res
-                               }
-                       }
-               }
-
-               macro_rules! test_preimages_exist {
-                       ($preimages_slice: expr, $monitor: expr) => {
-                               for preimage in $preimages_slice {
-                                       assert!($monitor.payment_preimages.contains_key(&preimage.1));
-                               }
-                       }
-               }
-
-               let keys = InMemoryChannelKeys::new(
-                       &secp_ctx,
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       [41; 32],
-                       0,
-                       (0, 0)
-               );
-
-               // Prune with one old state and a holder commitment tx holding a few overlaps with the
-               // old state.
-               let mut monitor = ChannelMonitor::new(keys,
-                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0, &Script::new(),
-                       (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
-                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
-                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()),
-                       10, Script::new(), 46, 0, HolderCommitmentTransaction::dummy());
-
-               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..10])).unwrap();
-               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger);
-               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key, &logger);
-               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key, &logger);
-               monitor.provide_latest_counterparty_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key, &logger);
-               for &(ref preimage, ref hash) in preimages.iter() {
-                       monitor.provide_payment_preimage(hash, preimage);
-               }
-
-               // Now provide a secret, pruning preimages 10-15
-               let mut secret = [0; 32];
-               secret[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-               monitor.provide_secret(281474976710655, secret.clone()).unwrap();
-               assert_eq!(monitor.payment_preimages.len(), 15);
-               test_preimages_exist!(&preimages[0..10], monitor);
-               test_preimages_exist!(&preimages[15..20], monitor);
-
-               // Now provide a further secret, pruning preimages 15-17
-               secret[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-               monitor.provide_secret(281474976710654, secret.clone()).unwrap();
-               assert_eq!(monitor.payment_preimages.len(), 13);
-               test_preimages_exist!(&preimages[0..10], monitor);
-               test_preimages_exist!(&preimages[17..20], monitor);
-
-               // Now update holder commitment tx info, pruning only element 18 as we still care about the
-               // previous commitment tx's preimages too
-               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..5])).unwrap();
-               secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-               monitor.provide_secret(281474976710653, secret.clone()).unwrap();
-               assert_eq!(monitor.payment_preimages.len(), 12);
-               test_preimages_exist!(&preimages[0..10], monitor);
-               test_preimages_exist!(&preimages[18..20], monitor);
-
-               // But if we do it again, we'll prune 5-10
-               monitor.provide_latest_holder_commitment_tx_info(HolderCommitmentTransaction::dummy(), preimages_to_holder_htlcs!(preimages[0..3])).unwrap();
-               secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-               monitor.provide_secret(281474976710652, secret.clone()).unwrap();
-               assert_eq!(monitor.payment_preimages.len(), 5);
-               test_preimages_exist!(&preimages[0..5], monitor);
-       }
-
-       #[test]
-       fn test_claim_txn_weight_computation() {
-               // We test Claim txn weight, knowing that we want expected weigth and
-               // not actual case to avoid sigs and time-lock delays hell variances.
-
-               let secp_ctx = Secp256k1::new();
-               let privkey = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
-               let pubkey = PublicKey::from_secret_key(&secp_ctx, &privkey);
-               let mut sum_actual_sigs = 0;
-
-               macro_rules! sign_input {
-                       ($sighash_parts: expr, $idx: expr, $amount: expr, $input_type: expr, $sum_actual_sigs: expr) => {
-                               let htlc = HTLCOutputInCommitment {
-                                       offered: if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::OfferedHTLC { true } else { false },
-                                       amount_msat: 0,
-                                       cltv_expiry: 2 << 16,
-                                       payment_hash: PaymentHash([1; 32]),
-                                       transaction_output_index: Some($idx as u32),
-                               };
-                               let redeem_script = if *$input_type == InputDescriptors::RevokedOutput { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
-                               let sighash = hash_to_message!(&$sighash_parts.signature_hash($idx, &redeem_script, $amount, SigHashType::All)[..]);
-                               let sig = secp_ctx.sign(&sighash, &privkey);
-                               $sighash_parts.access_witness($idx).push(sig.serialize_der().to_vec());
-                               $sighash_parts.access_witness($idx)[0].push(SigHashType::All as u8);
-                               sum_actual_sigs += $sighash_parts.access_witness($idx)[0].len();
-                               if *$input_type == InputDescriptors::RevokedOutput {
-                                       $sighash_parts.access_witness($idx).push(vec!(1));
-                               } else if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::RevokedReceivedHTLC {
-                                       $sighash_parts.access_witness($idx).push(pubkey.clone().serialize().to_vec());
-                               } else if *$input_type == InputDescriptors::ReceivedHTLC {
-                                       $sighash_parts.access_witness($idx).push(vec![0]);
-                               } else {
-                                       $sighash_parts.access_witness($idx).push(PaymentPreimage([1; 32]).0.to_vec());
-                               }
-                               $sighash_parts.access_witness($idx).push(redeem_script.into_bytes());
-                               println!("witness[0] {}", $sighash_parts.access_witness($idx)[0].len());
-                               println!("witness[1] {}", $sighash_parts.access_witness($idx)[1].len());
-                               println!("witness[2] {}", $sighash_parts.access_witness($idx)[2].len());
-                       }
-               }
-
-               let script_pubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script();
-               let txid = Txid::from_hex("56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d").unwrap();
-
-               // Justice tx with 1 to_holder, 2 revoked offered HTLCs, 1 revoked received HTLCs
-               let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
-               for i in 0..4 {
-                       claim_tx.input.push(TxIn {
-                               previous_output: BitcoinOutPoint {
-                                       txid,
-                                       vout: i,
-                               },
-                               script_sig: Script::new(),
-                               sequence: 0xfffffffd,
-                               witness: Vec::new(),
-                       });
-               }
-               claim_tx.output.push(TxOut {
-                       script_pubkey: script_pubkey.clone(),
-                       value: 0,
-               });
-               let base_weight = claim_tx.get_weight();
-               let inputs_des = vec![InputDescriptors::RevokedOutput, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedReceivedHTLC];
-               {
-                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
-                       for (idx, inp) in inputs_des.iter().enumerate() {
-                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
-                       }
-               }
-               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]),  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
-
-               // Claim tx with 1 offered HTLCs, 3 received HTLCs
-               claim_tx.input.clear();
-               sum_actual_sigs = 0;
-               for i in 0..4 {
-                       claim_tx.input.push(TxIn {
-                               previous_output: BitcoinOutPoint {
-                                       txid,
-                                       vout: i,
-                               },
-                               script_sig: Script::new(),
-                               sequence: 0xfffffffd,
-                               witness: Vec::new(),
-                       });
-               }
-               let base_weight = claim_tx.get_weight();
-               let inputs_des = vec![InputDescriptors::OfferedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC];
-               {
-                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
-                       for (idx, inp) in inputs_des.iter().enumerate() {
-                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
-                       }
-               }
-               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]),  claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
-
-               // Justice tx with 1 revoked HTLC-Success tx output
-               claim_tx.input.clear();
-               sum_actual_sigs = 0;
-               claim_tx.input.push(TxIn {
-                       previous_output: BitcoinOutPoint {
-                               txid,
-                               vout: 0,
-                       },
-                       script_sig: Script::new(),
-                       sequence: 0xfffffffd,
-                       witness: Vec::new(),
-               });
-               let base_weight = claim_tx.get_weight();
-               let inputs_des = vec![InputDescriptors::RevokedOutput];
-               {
-                       let mut sighash_parts = bip143::SigHashCache::new(&mut claim_tx);
-                       for (idx, inp) in inputs_des.iter().enumerate() {
-                               sign_input!(sighash_parts, idx, 0, inp, sum_actual_sigs);
-                       }
-               }
-               assert_eq!(base_weight + OnchainTxHandler::<InMemoryChannelKeys>::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_des.len() - sum_actual_sigs));
-       }
-
-       // Further testing is done in the ChannelManager integration tests.
-}
index bcd7fe2dd5dbb3e4e299b506a156ea9509749652..4c3db606d9f96b56c773898268aab74279e9390e 100644 (file)
 //! A bunch of useful utilities for building networks of nodes and exchanging messages between
 //! nodes for functional tests.
 
-use chain::chaininterface;
+use chain::Watch;
+use chain::channelmonitor::ChannelMonitor;
 use chain::transaction::OutPoint;
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
-use ln::channelmonitor::{ChannelMonitor, ManyChannelMonitor};
 use routing::router::{Route, get_route};
 use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
 use ln::features::InitFeatures;
@@ -21,13 +21,13 @@ use ln::msgs;
 use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
 use util::enforcing_trait_impls::EnforcingChannelKeys;
 use util::test_utils;
-use util::test_utils::TestChannelMonitor;
+use util::test_utils::TestChainMonitor;
 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 use util::errors::APIError;
 use util::config::UserConfig;
 use util::ser::{ReadableArgs, Writeable, Readable};
 
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::network::constants::Network;
 
@@ -44,51 +44,76 @@ use std::mem;
 use std::collections::HashMap;
 
 pub const CHAN_CONFIRM_DEPTH: u32 = 100;
-pub fn confirm_transaction<'a, 'b: 'a>(notifier: &'a chaininterface::BlockNotifierRef<'b, &chaininterface::ChainWatchInterfaceUtil>, chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: i32) {
-       assert!(chain.does_match_tx(tx));
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       notifier.block_connected_checked(&header, 1, &[tx; 1], &[chan_id as usize; 1]);
+
+pub fn confirm_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
+       let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+       let dummy_tx_count = tx.version as usize;
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![dummy_tx; dummy_tx_count],
+       };
+       block.txdata.push(tx.clone());
+       connect_block(node, &block, 1);
        for i in 2..CHAN_CONFIRM_DEPTH {
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               notifier.block_connected_checked(&header, i, &vec![], &[0; 0]);
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(node, &block, i);
        }
 }
 
-pub fn connect_blocks<'a, 'b>(notifier: &'a chaininterface::BlockNotifierRef<'b, &chaininterface::ChainWatchInterfaceUtil>, depth: u32, height: u32, parent: bool, prev_blockhash: BlockHash) -> BlockHash {
-       let mut header = BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       notifier.block_connected_checked(&header, height + 1, &Vec::new(), &Vec::new());
+pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32, height: u32, parent: bool, prev_blockhash: BlockHash) -> BlockHash {
+       let mut block = Block {
+               header: BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
+       connect_block(node, &block, height + 1);
        for i in 2..depth + 1 {
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               notifier.block_connected_checked(&header, height + i, &Vec::new(), &Vec::new());
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(node, &block, height + i);
        }
-       header.block_hash()
+       block.header.block_hash()
+}
+
+pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, height: u32) {
+       let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+       while node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height) {}
+       node.node.block_connected(&block.header, &txdata, height);
+}
+
+pub fn disconnect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, header: &BlockHeader, height: u32) {
+       node.chain_monitor.chain_monitor.block_disconnected(header, height);
+       node.node.block_disconnected(header);
 }
 
 pub struct TestChanMonCfg {
        pub tx_broadcaster: test_utils::TestBroadcaster,
        pub fee_estimator: test_utils::TestFeeEstimator,
-       pub chain_monitor: chaininterface::ChainWatchInterfaceUtil,
+       pub chain_source: test_utils::TestChainSource,
        pub logger: test_utils::TestLogger,
 }
 
 pub struct NodeCfg<'a> {
-       pub chain_monitor: &'a chaininterface::ChainWatchInterfaceUtil,
+       pub chain_source: &'a test_utils::TestChainSource,
        pub tx_broadcaster: &'a test_utils::TestBroadcaster,
        pub fee_estimator: &'a test_utils::TestFeeEstimator,
-       pub chan_monitor: test_utils::TestChannelMonitor<'a>,
+       pub chain_monitor: test_utils::TestChainMonitor<'a>,
        pub keys_manager: test_utils::TestKeysInterface,
        pub logger: &'a test_utils::TestLogger,
        pub node_seed: [u8; 32],
 }
 
 pub struct Node<'a, 'b: 'a, 'c: 'b> {
-       pub block_notifier: chaininterface::BlockNotifierRef<'a, &'c chaininterface::ChainWatchInterfaceUtil>,
-       pub chain_monitor: &'c chaininterface::ChainWatchInterfaceUtil,
+       pub chain_source: &'c test_utils::TestChainSource,
        pub tx_broadcaster: &'c test_utils::TestBroadcaster,
-       pub chan_monitor: &'b test_utils::TestChannelMonitor<'c>,
+       pub chain_monitor: &'b test_utils::TestChainMonitor<'c>,
        pub keys_manager: &'b test_utils::TestKeysInterface,
-       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
-       pub net_graph_msg_handler: NetGraphMsgHandler<&'c chaininterface::ChainWatchInterfaceUtil, &'c test_utils::TestLogger>,
+       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>,
+       pub net_graph_msg_handler: NetGraphMsgHandler<&'c test_utils::TestChainSource, &'c test_utils::TestLogger>,
        pub node_seed: [u8; 32],
        pub network_payment_count: Rc<RefCell<u8>>,
        pub network_chan_count: Rc<RefCell<u32>>,
@@ -101,7 +126,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        // Check that we processed all pending events
                        assert!(self.node.get_and_clear_pending_msg_events().is_empty());
                        assert!(self.node.get_and_clear_pending_events().is_empty());
-                       assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
+                       assert!(self.chain_monitor.added_monitors.lock().unwrap().is_empty());
 
                        // Check that if we serialize the Router, we can deserialize it again.
                        {
@@ -111,7 +136,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                                let network_graph_deser = <NetworkGraph>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap();
                                assert!(network_graph_deser == *self.net_graph_msg_handler.network_graph.read().unwrap());
                                let net_graph_msg_handler = NetGraphMsgHandler::from_net_graph(
-                                       self.chain_monitor, self.logger, network_graph_deser
+                                       Some(self.chain_source), self.logger, network_graph_deser
                                );
                                let mut chan_progress = 0;
                                loop {
@@ -141,7 +166,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
                        let mut deserialized_monitors = Vec::new();
                        {
-                               let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                               let old_monitors = self.chain_monitor.chain_monitor.monitors.lock().unwrap();
                                for (_, old_monitor) in old_monitors.iter() {
                                        let mut w = test_utils::TestVecWriter(Vec::new());
                                        old_monitor.write_for_disk(&mut w).unwrap();
@@ -161,27 +186,26 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
 
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
-                               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+                               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
                                        default_config: UserConfig::default(),
                                        keys_manager: self.keys_manager,
                                        fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 },
-                                       monitor: self.chan_monitor,
+                                       chain_monitor: self.chain_monitor,
                                        tx_broadcaster: self.tx_broadcaster.clone(),
                                        logger: &test_utils::TestLogger::new(),
                                        channel_monitors,
                                }).unwrap();
                        }
 
-                       let chain_watch = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
-                       let channel_monitor = test_utils::TestChannelMonitor::new(&chain_watch, self.tx_broadcaster.clone(), &self.logger, &feeest);
+                       let chain_source = test_utils::TestChainSource::new(Network::Testnet);
+                       let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), self.tx_broadcaster.clone(), &self.logger, &feeest);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
-                               if let Err(_) = channel_monitor.add_monitor(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
+                               if let Err(_) = chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) {
                                        panic!();
                                }
                        }
-                       if chain_watch != *self.chain_monitor {
-                               panic!();
-                       }
+                       assert_eq!(*chain_source.watched_txn.lock().unwrap(), *self.chain_source.watched_txn.lock().unwrap());
+                       assert_eq!(*chain_source.watched_outputs.lock().unwrap(), *self.chain_source.watched_outputs.lock().unwrap());
                }
        }
 }
@@ -268,7 +292,7 @@ macro_rules! get_feerate {
 macro_rules! get_local_commitment_txn {
        ($node: expr, $channel_id: expr) => {
                {
-                       let mut monitors = $node.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                       let mut monitors = $node.chain_monitor.chain_monitor.monitors.lock().unwrap();
                        let mut commitment_txn = None;
                        for (funding_txo, monitor) in monitors.iter_mut() {
                                if funding_txo.to_channel_id() == $channel_id {
@@ -306,7 +330,7 @@ macro_rules! unwrap_send_err {
 macro_rules! check_added_monitors {
        ($node: expr, $count: expr) => {
                {
-                       let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+                       let mut added_monitors = $node.chain_monitor.added_monitors.lock().unwrap();
                        assert_eq!(added_monitors.len(), $count);
                        added_monitors.clear();
                }
@@ -345,7 +369,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
        {
-               let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -353,7 +377,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 
        node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
        {
-               let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -373,7 +397,7 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, '
 }
 
 pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
-       confirm_transaction(&node_conf.block_notifier, &node_conf.chain_monitor, &tx, tx.version);
+       confirm_transaction(node_conf, tx);
        node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id()));
 }
 
@@ -399,7 +423,7 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv
 
 pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
        create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
-       confirm_transaction(&node_a.block_notifier, &node_a.chain_monitor, &tx, tx.version);
+       confirm_transaction(node_a, tx);
        create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 }
 
@@ -1067,9 +1091,9 @@ pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
        for i in 0..node_count {
                let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-               let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
+               let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                let logger = test_utils::TestLogger::with_id(format!("node {}", i));
-               chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator, chain_monitor, logger });
+               chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator, chain_source, logger });
        }
 
        chan_mon_cfgs
@@ -1081,39 +1105,36 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMon
        for i in 0..node_count {
                let seed = [i as u8; 32];
                let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-               let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[i].chain_monitor, &chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
-               nodes.push(NodeCfg { chain_monitor: &chanmon_cfgs[i].chain_monitor, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chan_monitor, keys_manager, node_seed: seed });
+               let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[i].chain_source), &chanmon_cfgs[i].tx_broadcaster, &chanmon_cfgs[i].logger, &chanmon_cfgs[i].fee_estimator);
+               nodes.push(NodeCfg { chain_source: &chanmon_cfgs[i].chain_source, logger: &chanmon_cfgs[i].logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chain_monitor, keys_manager, node_seed: seed });
        }
 
        nodes
 }
 
-pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
+pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChainMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator, &'b test_utils::TestLogger>> {
        let mut chanmgrs = Vec::new();
        for i in 0..node_count {
                let mut default_config = UserConfig::default();
                default_config.channel_options.announced_channel = true;
                default_config.peer_channel_config_limits.force_announced_channel_preference = false;
                default_config.own_channel_config.our_htlc_minimum_msat = 1000; // sanitization being done by the sender, to exerce receiver logic we need to lift of limit
-               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
+               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chain_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0);
                chanmgrs.push(node);
        }
 
        chanmgrs
 }
 
-pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
+pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChainMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator, &'c test_utils::TestLogger>>) -> Vec<Node<'a, 'b, 'c>> {
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
        let payment_count = Rc::new(RefCell::new(0));
 
        for i in 0..node_count {
-               let block_notifier = chaininterface::BlockNotifier::new(cfgs[i].chain_monitor);
-               block_notifier.register_listener(&cfgs[i].chan_monitor.simple_monitor as &chaininterface::ChainListener);
-               block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener);
-               let net_graph_msg_handler = NetGraphMsgHandler::new(cfgs[i].chain_monitor, cfgs[i].logger);
-               nodes.push(Node{ chain_monitor: &cfgs[i].chain_monitor, block_notifier,
-                                tx_broadcaster: cfgs[i].tx_broadcaster, chan_monitor: &cfgs[i].chan_monitor,
+               let net_graph_msg_handler = NetGraphMsgHandler::new(None, cfgs[i].logger);
+               nodes.push(Node{ chain_source: cfgs[i].chain_source,
+                                tx_broadcaster: cfgs[i].tx_broadcaster, chain_monitor: &cfgs[i].chain_monitor,
                                 keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
                                 node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
                                 network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
index 9a3c4b793612ed6215f420599ccf657d7895f97f..59b0d071f17c6f7728f8a46ee6b36e90bfa9bcfe 100644 (file)
 //! payments/messages between them, and often checking the resulting ChannelMonitors are able to
 //! claim outputs on-chain.
 
+use chain::Watch;
+use chain::channelmonitor;
+use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use chain::transaction::OutPoint;
 use chain::keysinterface::{ChannelKeys, KeysInterface, SpendableOutputDescriptor};
-use chain::chaininterface;
-use chain::chaininterface::{ChainListener, ChainWatchInterfaceUtil, BlockNotifier};
 use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
 use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure, BREAKDOWN_TIMEOUT};
-use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
-use ln::channelmonitor;
 use ln::channel::{Channel, ChannelError};
 use ln::{chan_utils, onion_utils};
 use routing::router::{Route, RouteHop, get_route};
@@ -417,14 +416,14 @@ fn test_1_conf_open() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
-       assert!(nodes[0].chain_monitor.does_match_tx(&tx));
-       assert!(nodes[1].chain_monitor.does_match_tx(&tx));
-
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected_checked(&header, 1, &[&tx; 1], &[tx.version as usize; 1]);
+       let block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![tx],
+       };
+       connect_block(&nodes[1], &block, 1);
        nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
 
-       nodes[0].block_notifier.block_connected_checked(&header, 1, &[&tx; 1], &[tx.version as usize; 1]);
+       connect_block(&nodes[0], &block, 1);
        let (funding_locked, _) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
 
@@ -447,9 +446,12 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        if steps & 0b1000_0000 != 0{
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[0].block_notifier.block_connected_checked(&header, 1, &Vec::new(), &[0; 0]);
-               nodes[1].block_notifier.block_connected_checked(&header, 1, &Vec::new(), &[0; 0]);
+               let block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(&nodes[0], &block, 1);
+               connect_block(&nodes[1], &block, 1);
        }
 
        if steps & 0x0f == 0 { return; }
@@ -473,7 +475,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        if steps & 0x0f == 4 { return; }
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -483,7 +485,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        if steps & 0x0f == 5 { return; }
        nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
        {
-               let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -503,7 +505,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx);
 
        if steps & 0x0f == 7 { return; }
-       confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &tx, tx.version);
+       confirm_transaction(&nodes[0], &tx);
        create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
 }
 
@@ -657,7 +659,7 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        // nothing happens since node[1] is in AwaitingRemoteRevoke
        nodes[1].node.send_payment(&route, our_payment_hash, &None).unwrap();
        {
-               let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 0);
                added_monitors.clear();
        }
@@ -830,8 +832,8 @@ fn pre_funding_lock_shutdown_test() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::known(), InitFeatures::known());
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![tx.clone()]}, 1);
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![tx.clone()]}, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![tx.clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![tx.clone()]}, 1);
 
        nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
        let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
@@ -1498,7 +1500,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
        assert_eq!(has_both_htlcs, 2);
 
        let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[0], 1);
 
        // Check we only broadcast 1 timeout tx
@@ -2340,7 +2342,7 @@ fn channel_monitor_network_test() {
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
                check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
        }
@@ -2357,7 +2359,7 @@ fn channel_monitor_network_test() {
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+               connect_block(&nodes[2], &Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
                check_added_monitors!(nodes[2], 1);
                test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
        }
@@ -2398,7 +2400,7 @@ fn channel_monitor_network_test() {
                claim_funds!(nodes[3], nodes[2], payment_preimage_1, 3_000_000);
 
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[3].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
+               connect_block(&nodes[3], &Block { header, txdata: vec![node_txn[0].clone()] }, 1);
                check_added_monitors!(nodes[3], 1);
 
                check_preimage_claim(&nodes[3], &node_txn);
@@ -2409,7 +2411,7 @@ fn channel_monitor_network_test() {
 
        { // Cheat and reset nodes[4]'s height to 1
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[4].block_notifier.block_connected(&Block { header, txdata: vec![] }, 1);
+               connect_block(&nodes[4], &Block { header, txdata: vec![] }, 1);
        }
 
        assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
@@ -2420,11 +2422,17 @@ fn channel_monitor_network_test() {
        // buffer space).
 
        let (close_chan_update_1, close_chan_update_2) = {
-               let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[3].block_notifier.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+               let mut block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(&nodes[3], &block, 2);
                for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
-                       header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+                       block = Block {
+                               header: BlockHeader { version: 0x20000000, prev_blockhash: block.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                               txdata: vec![],
+                       };
+                       connect_block(&nodes[3], &block, i);
                }
                let events = nodes[3].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -2451,12 +2459,18 @@ fn channel_monitor_network_test() {
                // Claim the payment on nodes[4], giving it knowledge of the preimage
                claim_funds!(nodes[4], nodes[3], payment_preimage_2, 3_000_000);
 
-               header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
 
-               nodes[4].block_notifier.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+               connect_block(&nodes[4], &block, 2);
                for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
-                       header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       nodes[4].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+                       block = Block {
+                               header: BlockHeader { version: 0x20000000, prev_blockhash: block.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                               txdata: vec![],
+                       };
+                       connect_block(&nodes[4], &block, i);
                }
                let events = nodes[4].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@ -2469,8 +2483,11 @@ fn channel_monitor_network_test() {
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
 
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[4].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![node_txn[0].clone()],
+               };
+               connect_block(&nodes[4], &block, TEST_FINAL_CLTV - 5);
 
                check_preimage_claim(&nodes[4], &node_txn);
                (close_chan_update_1, close_chan_update_2)
@@ -2516,7 +2533,7 @@ fn test_justice_tx() {
 
        {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                {
                        let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                        assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment tx
@@ -2529,13 +2546,13 @@ fn test_justice_tx() {
                check_added_monitors!(nodes[1], 1);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                check_added_monitors!(nodes[0], 1);
                // Broadcast revoked HTLC-timeout on node 1
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()] }, 1);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2563,7 +2580,7 @@ fn test_justice_tx() {
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4, 3_000_000);
        {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                {
                        let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                        assert_eq!(node_txn.len(), 2); //ChannelMonitor: penalty tx, ChannelManager: local commitment tx
@@ -2575,11 +2592,11 @@ fn test_justice_tx() {
                check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                check_added_monitors!(nodes[1], 1);
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()] }, 1);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -2606,7 +2623,7 @@ fn revoked_output_claim() {
 
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
@@ -2615,7 +2632,7 @@ fn revoked_output_claim() {
        check_spends!(node_txn[1], chan_1.3);
 
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        get_announce_close_broadcast_events(&nodes, 0, 1);
        check_added_monitors!(nodes[0], 1)
 }
@@ -2652,11 +2669,11 @@ fn claim_htlc_outputs_shared_tx() {
 
        {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                check_added_monitors!(nodes[0], 1);
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                check_added_monitors!(nodes[1], 1);
-               connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2715,13 +2732,13 @@ fn claim_htlc_outputs_single_tx() {
 
        {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+               connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
                check_added_monitors!(nodes[0], 1);
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+               connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
                check_added_monitors!(nodes[1], 1);
                expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 
-               connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.block_hash());
+               connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 200, true, header.block_hash());
                expect_payment_failed!(nodes[1], payment_hash_2, true);
 
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -2764,8 +2781,8 @@ fn claim_htlc_outputs_single_tx() {
 
 #[test]
 fn test_htlc_on_chain_success() {
-       // Test that in case of a unilateral close onchain, we detect the state of output thanks to
-       // ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
+       // Test that in case of a unilateral close onchain, we detect the state of output and pass
+       // the preimage backward accordingly. So here we test that ChannelManager is
        // broadcasting the right event to other nodes in payment path.
        // We test with two HTLCs simultaneously as that was not handled correctly in the past.
        // A --------------------> B ----------------------> C (preimage)
@@ -2809,7 +2826,7 @@ fn test_htlc_on_chain_success() {
        assert!(updates.update_fail_malformed_htlcs.is_empty());
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
 
-       nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+       connect_block(&nodes[2], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
        check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
@@ -2827,16 +2844,16 @@ fn test_htlc_on_chain_success() {
        assert_eq!(node_txn[1].lock_time, 0);
 
        // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: node_txn}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: node_txn}, 1);
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
                added_monitors.clear();
        }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
-               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 2);
                assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
                assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
@@ -2900,7 +2917,7 @@ fn test_htlc_on_chain_success() {
        // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        check_spends!(commitment_tx[0], chan_1.3);
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 1 (HTLC-Success)
@@ -2919,7 +2936,7 @@ fn test_htlc_on_chain_success() {
        // we already checked the same situation with A.
 
        // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -2943,8 +2960,8 @@ fn test_htlc_on_chain_success() {
 
 #[test]
 fn test_htlc_on_chain_timeout() {
-       // Test that in case of a unilateral close onchain, we detect the state of output thanks to
-       // ChainWatchInterface and timeout the HTLC backward accordingly. So here we test that ChannelManager is
+       // Test that in case of a unilateral close onchain, we detect the state of output and
+       // timeout the HTLC backward accordingly. So here we test that ChannelManager is
        // broadcasting the right event to other nodes in payment path.
        // A ------------------> B ----------------------> C (timeout)
        //    B's commitment tx                 C's commitment tx
@@ -2987,7 +3004,7 @@ fn test_htlc_on_chain_timeout() {
                },
                _ => panic!("Unexpected event"),
        };
-       nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+       connect_block(&nodes[2], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
        check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
@@ -2997,7 +3014,7 @@ fn test_htlc_on_chain_timeout() {
 
        // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
        // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
        let timeout_tx;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -3017,8 +3034,8 @@ fn test_htlc_on_chain_timeout() {
                node_txn.clear();
        }
 
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![timeout_tx]}, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header, txdata: vec![timeout_tx]}, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false);
 
@@ -3043,7 +3060,7 @@ fn test_htlc_on_chain_timeout() {
        let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
        check_spends!(commitment_tx[0], chan_1.3);
 
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
+       connect_block(&nodes[0], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx
@@ -3079,8 +3096,8 @@ fn test_simple_commitment_revoked_fail_backward() {
        let (_, payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false);
 
@@ -3232,9 +3249,9 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[1], 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
@@ -3427,13 +3444,13 @@ fn test_htlc_ignore_latest_remote_commitment() {
        assert_eq!(node_txn.len(), 2);
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
-       // Duplicate the block_connected call since this may happen due to other listeners
+       // Duplicate the connect_block call since this may happen due to other listeners
        // registering new transactions
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
 }
 
 #[test]
@@ -3492,8 +3509,11 @@ fn test_force_close_fail_back() {
                node_txn.remove(0)
        };
 
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected_checked(&header, 1, &[&tx], &[1]);
+       let block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![tx.clone()],
+       };
+       connect_block(&nodes[1], &block, 1);
 
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], false);
@@ -3501,11 +3521,11 @@ fn test_force_close_fail_back() {
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
-               let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
                monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
                        .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
        }
-       nodes[2].block_notifier.block_connected_checked(&header, 1, &[&tx], &[1]);
+       connect_block(&nodes[2], &block, 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        assert_eq!(node_txn[0].input.len(), 1);
@@ -3537,10 +3557,8 @@ fn test_unconf_chan() {
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                headers.push(header.clone());
        }
-       let mut height = 99;
        while !headers.is_empty() {
-               nodes[0].node.block_disconnected(&headers.pop().unwrap(), height);
-               height -= 1;
+               nodes[0].node.block_disconnected(&headers.pop().unwrap());
        }
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
@@ -3835,7 +3853,7 @@ fn test_funding_peer_disconnect() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       confirm_transaction(&nodes[0].block_notifier, &nodes[0].chain_monitor, &tx, tx.version);
+       confirm_transaction(&nodes[0], &tx);
        let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events_1.len(), 1);
        match events_1[0] {
@@ -3850,7 +3868,7 @@ fn test_funding_peer_disconnect() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       confirm_transaction(&nodes[1].block_notifier, &nodes[1].chain_monitor, &tx, tx.version);
+       confirm_transaction(&nodes[1], &tx);
        let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 2);
        let funding_locked = match events_2[0] {
@@ -4083,13 +4101,16 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                route_payment(&nodes[0], &[&nodes[1]], 100000).1
        };
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected_checked(&header, 101, &[], &[]);
-       nodes[1].block_notifier.block_connected_checked(&header, 101, &[], &[]);
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
+       connect_block(&nodes[0], &block, 101);
+       connect_block(&nodes[1], &block, 101);
        for i in 102..TEST_FINAL_CLTV + 100 + 1 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS {
-               header.prev_blockhash = header.block_hash();
-               nodes[0].block_notifier.block_connected_checked(&header, i, &[], &[]);
-               nodes[1].block_notifier.block_connected_checked(&header, i, &[], &[]);
+               block.header.prev_blockhash = block.block_hash();
+               connect_block(&nodes[0], &block, i);
+               connect_block(&nodes[1], &block, i);
        }
 
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -4154,18 +4175,21 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) {
                check_added_monitors!(nodes[1], 0);
        }
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected_checked(&header, 101, &[], &[]);
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
+       connect_block(&nodes[1], &block, 101);
        for i in 102..TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS {
-               header.prev_blockhash = header.block_hash();
-               nodes[1].block_notifier.block_connected_checked(&header, i, &[], &[]);
+               block.header.prev_blockhash = block.block_hash();
+               connect_block(&nodes[1], &block, i);
        }
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
 
-       header.prev_blockhash = header.block_hash();
-       nodes[1].block_notifier.block_connected_checked(&header, TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS, &[], &[]);
+       block.header.prev_blockhash = block.block_hash();
+       connect_block(&nodes[1], &block, TEST_FINAL_CLTV + 100 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS);
 
        if forwarded_htlc {
                expect_pending_htlcs_forwardable!(nodes[1]);
@@ -4283,9 +4307,9 @@ fn test_no_txn_manager_serialize_deserialize() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
@@ -4294,12 +4318,12 @@ fn test_no_txn_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4310,11 +4334,11 @@ fn test_no_txn_manager_serialize_deserialize() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: config,
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4323,9 +4347,8 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
-       nodes[0].block_notifier.register_listener(nodes[0].node);
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
 
@@ -4358,9 +4381,9 @@ fn test_manager_serialize_deserialize_events() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let fee_estimator: test_utils::TestFeeEstimator;
        let logger: test_utils::TestLogger;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe
@@ -4381,7 +4404,7 @@ fn test_manager_serialize_deserialize_events() {
 
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
        {
-               let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -4389,7 +4412,7 @@ fn test_manager_serialize_deserialize_events() {
 
        node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id()));
        {
-               let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+               let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
                assert_eq!(added_monitors.len(), 1);
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
@@ -4402,12 +4425,12 @@ fn test_manager_serialize_deserialize_events() {
        // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
        logger = test_utils::TestLogger::new();
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4418,11 +4441,11 @@ fn test_manager_serialize_deserialize_events() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: config,
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4433,7 +4456,7 @@ fn test_manager_serialize_deserialize_events() {
 
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
 
-       assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
 
        // After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager
@@ -4448,7 +4471,6 @@ fn test_manager_serialize_deserialize_events() {
        };
 
        // Make sure the channel is functioning as though the de/serialization never happened
-       nodes[0].block_notifier.register_listener(nodes[0].node);
        assert_eq!(nodes[0].node.list_channels().len(), 1);
        check_added_monitors!(nodes[0], 1);
 
@@ -4480,9 +4502,9 @@ fn test_simple_manager_serialize_deserialize() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
 
@@ -4493,12 +4515,12 @@ fn test_simple_manager_serialize_deserialize() {
 
        let nodes_0_serialized = nodes[0].node.encode();
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
        assert!(chan_0_monitor_read.is_empty());
@@ -4508,11 +4530,11 @@ fn test_simple_manager_serialize_deserialize() {
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: UserConfig::default(),
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       monitor: nodes[0].chan_monitor,
+                       chain_monitor: nodes[0].chain_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: &logger,
                        channel_monitors,
@@ -4521,7 +4543,7 @@ fn test_simple_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
+       assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -4539,16 +4561,16 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let logger: test_utils::TestLogger;
        let fee_estimator: test_utils::TestFeeEstimator;
-       let new_chan_monitor: test_utils::TestChannelMonitor;
+       let new_chain_monitor: test_utils::TestChainMonitor;
        let keys_manager: test_utils::TestKeysInterface;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
        create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
        let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
 
        let mut node_0_stale_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write_for_disk(&mut writer).unwrap();
                node_0_stale_monitors_serialized.push(writer.0);
@@ -4567,7 +4589,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
        // nodes[3])
        let mut node_0_monitors_serialized = Vec::new();
-       for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+       for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
                let mut writer = test_utils::TestVecWriter(Vec::new());
                monitor.1.write_for_disk(&mut writer).unwrap();
                node_0_monitors_serialized.push(writer.0);
@@ -4575,8 +4597,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        logger = test_utils::TestLogger::new();
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
-       nodes[0].chan_monitor = &new_chan_monitor;
+       new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator);
+       nodes[0].chain_monitor = &new_chain_monitor;
 
        let mut node_0_stale_monitors = Vec::new();
        for serialized in node_0_stale_monitors_serialized.iter() {
@@ -4598,11 +4620,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        let mut nodes_0_read = &nodes_0_serialized[..];
        if let Err(msgs::DecodeError::InvalidValue) =
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
                keys_manager: &keys_manager,
                fee_estimator: &fee_estimator,
-               monitor: nodes[0].chan_monitor,
+               chain_monitor: nodes[0].chain_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
                channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
@@ -4612,11 +4634,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 
        let mut nodes_0_read = &nodes_0_serialized[..];
        let (_, nodes_0_deserialized_tmp) =
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
                keys_manager: &keys_manager,
                fee_estimator: &fee_estimator,
-               monitor: nodes[0].chan_monitor,
+               chain_monitor: nodes[0].chain_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: &logger,
                channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
@@ -4632,7 +4654,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        for monitor in node_0_monitors.drain(..) {
-               assert!(nodes[0].chan_monitor.add_monitor(monitor.get_funding_txo().0, monitor).is_ok());
+               assert!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor).is_ok());
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
@@ -4662,7 +4684,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
 macro_rules! check_spendable_outputs {
        ($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => {
                {
-                       let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events();
+                       let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
                        let mut txn = Vec::new();
                        for event in events {
                                match event {
@@ -4796,8 +4818,8 @@ fn test_claim_sizeable_push_msat() {
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone()] }, 0);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
@@ -4824,10 +4846,10 @@ fn test_claim_on_remote_sizeable_push_msat() {
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
+       connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone()] }, 0);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 2);
@@ -4853,14 +4875,14 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
 
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 0);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 0);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 3);
@@ -4889,7 +4911,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        assert!(nodes[1].node.claim_funds(payment_preimage, &None, 3_000_000));
        check_added_monitors!(nodes[1], 1);
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -4910,8 +4932,8 @@ fn test_static_spendable_outputs_preimage_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
@@ -4939,7 +4961,7 @@ fn test_static_spendable_outputs_timeout_tx() {
 
        // Settle A's commitment tx on B' chain
        let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 0);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()] }, 0);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -4956,8 +4978,8 @@ fn test_static_spendable_outputs_timeout_tx() {
        check_spends!(node_txn[2], node_txn[1]);
 
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
        expect_payment_failed!(nodes[1], our_payment_hash, true);
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
@@ -4983,7 +5005,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
 
        let  header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 0);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 0);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
@@ -4993,8 +5015,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        check_spends!(node_txn[0], revoked_local_txn[0]);
 
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header: header_1, txdata: vec![node_txn[0].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
@@ -5020,7 +5042,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        // A will generate HTLC-Timeout from revoked commitment tx
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -5032,7 +5054,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        check_spends!(revoked_htlc_txn[1], chan_1.3);
 
        // B will generate justice tx from A's revoked commitment/HTLC tx
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 0);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 0);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
@@ -5057,8 +5079,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        check_spends!(node_txn[2], chan_1.3);
 
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[1].clone()] }, 1);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header: header_1, txdata: vec![node_txn[1].clone()] }, 1);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        // Check B's ChannelMonitor was able to generate the right spendable output descriptor
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
@@ -5089,7 +5111,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        // B will generate HTLC-Success from revoked commitment tx
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -5104,7 +5126,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH
 
        // A will generate justice tx from B's revoked commitment/HTLC tx
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
+       connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -5129,8 +5151,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        check_spends!(node_txn[2], chan_1.3);
 
        let header_1 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_1, txdata: vec![node_txn[1].clone()] }, 1);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
+       connect_block(&nodes[0], &Block { header: header_1, txdata: vec![node_txn[1].clone()] }, 1);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
 
        // Note that nodes[0]'s tx_broadcaster is still locked, so if we get here the channelmonitor
        // didn't try to generate any new transactions.
@@ -5147,8 +5169,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
 
 #[test]
 fn test_onchain_to_onchain_claim() {
-       // Test that in case of channel closure, we detect the state of output thanks to
-       // ChainWatchInterface and claim HTLC on downstream peer's remote commitment tx.
+       // Test that in case of channel closure, we detect the state of output and claim HTLC
+       // on downstream peer's remote commitment tx.
        // First, have C claim an HTLC against its own latest commitment transaction.
        // Then, broadcast these to B, which should update the monitor downstream on the A<->B
        // channel.
@@ -5180,7 +5202,7 @@ fn test_onchain_to_onchain_claim() {
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
        assert!(updates.update_fail_malformed_htlcs.is_empty());
 
-       nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+       connect_block(&nodes[2], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
        check_added_monitors!(nodes[2], 1);
 
@@ -5196,7 +5218,7 @@ fn test_onchain_to_onchain_claim() {
        assert_eq!(c_txn[0].lock_time, 0); // Success tx
 
        // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1);
        {
                let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-timeout tx
@@ -5231,7 +5253,7 @@ fn test_onchain_to_onchain_claim() {
        };
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
        assert_eq!(b_txn.len(), 3);
@@ -5267,7 +5289,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        check_spends!(commitment_txn[0], chan_2.3);
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
@@ -5290,7 +5312,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        }
 
        nodes[2].node.claim_funds(our_payment_preimage, &None, 900_000);
-       nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
+       connect_block(&nodes[2], &Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[2], 3);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -5316,8 +5338,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        check_spends!(htlc_success_txn[0], commitment_txn[0]);
        check_spends!(htlc_success_txn[1], commitment_txn[0]);
 
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header, txdata: vec![htlc_timeout_tx] }, 200);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 200, true, header.block_hash());
        expect_pending_htlcs_forwardable!(nodes[1]);
        let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(htlc_updates.update_add_htlcs.is_empty());
@@ -5342,7 +5364,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        expect_payment_failed!(nodes[0], duplicate_payment_hash, false);
 
        // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200);
+       connect_block(&nodes[1], &Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200);
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
        assert!(updates.update_fail_htlcs.is_empty());
@@ -5382,7 +5404,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        nodes[1].node.claim_funds(payment_preimage, &None, 9_000_000);
        check_added_monitors!(nodes[1], 1);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![local_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -5402,8 +5424,8 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        };
 
        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header: header_201, txdata: node_txn.clone() }, 201);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
+       connect_block(&nodes[1], &Block { header: header_201, txdata: node_txn.clone() }, 201);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
 
        // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
        let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
@@ -5543,11 +5565,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        if announce_latest {
-               nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![ds_last_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[2], &Block { header, txdata: vec![ds_last_commitment_tx[0].clone()]}, 1);
        } else {
-               nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![ds_prev_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[2], &Block { header, txdata: vec![ds_prev_commitment_tx[0].clone()]}, 1);
        }
-       connect_blocks(&nodes[2].block_notifier, ANTI_REORG_DELAY - 1, 1, true,  header.block_hash());
+       connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1, 1, true,  header.block_hash());
        check_closed_broadcast!(nodes[2], false);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 3);
@@ -5685,7 +5707,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
+       connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -5698,8 +5720,8 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        };
 
        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
+       connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
        expect_payment_failed!(nodes[0], our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
@@ -5721,8 +5743,8 @@ fn test_key_derivation_params() {
        // We manually create the node configuration to backup the seed.
        let seed = [42; 32];
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
-       let chan_monitor = test_utils::TestChannelMonitor::new(&chanmon_cfgs[0].chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
-       let node = NodeCfg { chain_monitor: &chanmon_cfgs[0].chain_monitor, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chan_monitor, keys_manager, node_seed: seed };
+       let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator);
+       let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager, node_seed: seed };
        let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        node_cfgs.remove(0);
        node_cfgs.insert(0, node);
@@ -5755,7 +5777,7 @@ fn test_key_derivation_params() {
 
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![local_txn_1[0].clone()] }, 200);
+       connect_block(&nodes[0], &Block { header, txdata: vec![local_txn_1[0].clone()] }, 200);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
@@ -5768,8 +5790,8 @@ fn test_key_derivation_params() {
        };
 
        let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
+       connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
        expect_payment_failed!(nodes[0], our_payment_hash, true);
 
        // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
@@ -5794,15 +5816,15 @@ fn test_static_output_closing_tx() {
        let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![closing_tx.clone()] }, 0);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
+       connect_block(&nodes[0], &Block { header, txdata: vec![closing_tx.clone()] }, 0);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[0], 2, node_cfgs[0].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], closing_tx);
 
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![closing_tx.clone()] }, 0);
-       connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
+       connect_block(&nodes[1], &Block { header, txdata: vec![closing_tx.clone()] }, 0);
+       connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
 
        let spend_txn = check_spendable_outputs!(nodes[1], 2, node_cfgs[1].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
@@ -5840,10 +5862,13 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0);
        check_added_monitors!(nodes[1], 1);
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
        for i in 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + CHAN_CONFIRM_DEPTH + 1 {
-               nodes[1].block_notifier.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
-               header.prev_blockhash = header.block_hash();
+               connect_block(&nodes[1], &block, i);
+               block.header.prev_blockhash = block.block_hash();
        }
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], false);
@@ -5873,7 +5898,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 
        for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: Vec::new()}, i);
+               connect_block(&nodes[0], &Block { header, txdata: Vec::new()}, i);
                header.prev_blockhash = header.block_hash();
        }
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
@@ -5914,10 +5939,13 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
                check_added_monitors!(nodes[0], 1);
        }
 
-       let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let mut block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
        for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
-               nodes[0].block_notifier.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
-               header.prev_blockhash = header.block_hash();
+               connect_block(&nodes[0], &block, i);
+               block.header.prev_blockhash = block.block_hash();
        }
        if !check_revoke_no_close {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
@@ -7096,16 +7124,16 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 
        if announce_latest {
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_last_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![as_last_commitment_tx[0].clone()]}, 1);
        } else {
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_prev_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![as_prev_commitment_tx[0].clone()]}, 1);
        }
 
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 1, true,  header.block_hash());
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 1, true,  header.block_hash());
        let events = nodes[0].node.get_and_clear_pending_events();
        // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
        assert_eq!(events.len(), 2);
@@ -7173,11 +7201,11 @@ fn test_no_failure_dust_htlc_local_commitment() {
        };
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].chan_monitor.simple_monitor.block_connected(&header, 1, &[&dummy_tx], &[1;1]);
+       nodes[0].chain_monitor.chain_monitor.block_connected(&header, &[(0, &dummy_tx)], 1);
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
        // We broadcast a few more block to check everything is all right
-       connect_blocks(&nodes[0].block_notifier, 20, 1, true,  header.block_hash());
+       connect_blocks(&nodes[0], 20, 1, true, header.block_hash());
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
 
@@ -7217,38 +7245,38 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let mut timeout_tx = Vec::new();
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![as_commitment_tx[0].clone()]}, 1);
                check_closed_broadcast!(nodes[0], false);
                check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
-               let parent_hash  = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.block_hash());
+               let parent_hash  = connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 2, true, header.block_hash());
                expect_payment_failed!(nodes[0], dust_hash, true);
                assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
                // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
                let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-               nodes[0].block_notifier.block_connected(&Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7);
+               connect_block(&nodes[0], &Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7);
                let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 8, true, header_3.block_hash());
+               connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 8, true, header_3.block_hash());
                expect_payment_failed!(nodes[0], non_dust_hash, true);
        } else {
                // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
-               nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![bs_commitment_tx[0].clone()]}, 1);
+               connect_block(&nodes[0], &Block { header, txdata: vec![bs_commitment_tx[0].clone()]}, 1);
                check_closed_broadcast!(nodes[0], false);
                check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
-               let parent_hash  = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.block_hash());
+               let parent_hash  = connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 2, true, header.block_hash());
                let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                if !revoked {
                        expect_payment_failed!(nodes[0], dust_hash, true);
                        assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
                        // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
-                       nodes[0].block_notifier.block_connected(&Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7);
+                       connect_block(&nodes[0], &Block { header: header_2, txdata: vec![timeout_tx[0].clone()]}, 7);
                        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                        let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-                       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 8, true, header_3.block_hash());
+                       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 8, true, header_3.block_hash());
                        expect_payment_failed!(nodes[0], non_dust_hash, true);
                } else {
                        // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
@@ -7438,7 +7466,7 @@ fn test_data_loss_protect() {
        let logger;
        let fee_estimator;
        let tx_broadcaster;
-       let chain_monitor;
+       let chain_source;
        let monitor;
        let node_state_0;
        let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -7450,8 +7478,8 @@ fn test_data_loss_protect() {
 
        // Cache node A state before any channel update
        let previous_node_state = nodes[0].node.encode();
-       let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
-       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
+       let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
+       nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chain_monitor_state).unwrap();
 
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
        send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
@@ -7461,19 +7489,19 @@ fn test_data_loss_protect() {
 
        // Restore node A from previous state
        logger = test_utils::TestLogger::with_id(format!("node {}", 0));
-       let mut chan_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0)).unwrap().1;
-       chain_monitor = ChainWatchInterfaceUtil::new(Network::Testnet);
+       let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0)).unwrap().1;
+       chain_source = test_utils::TestChainSource::new(Network::Testnet);
        tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
        fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
        keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
-       monitor = test_utils::TestChannelMonitor::new(&chain_monitor, &tx_broadcaster, &logger, &fee_estimator);
+       monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator);
        node_state_0 = {
                let mut channel_monitors = HashMap::new();
-               channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chan_monitor);
-               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+               channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
+               <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
                        keys_manager: &keys_manager,
                        fee_estimator: &fee_estimator,
-                       monitor: &monitor,
+                       chain_monitor: &monitor,
                        logger: &logger,
                        tx_broadcaster: &tx_broadcaster,
                        default_config: UserConfig::default(),
@@ -7481,13 +7509,9 @@ fn test_data_loss_protect() {
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
-       assert!(monitor.add_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok());
-       nodes[0].chan_monitor = &monitor;
-       nodes[0].chain_monitor = &chain_monitor;
-
-       nodes[0].block_notifier = BlockNotifier::new(&nodes[0].chain_monitor);
-       nodes[0].block_notifier.register_listener(&nodes[0].chan_monitor.simple_monitor);
-       nodes[0].block_notifier.register_listener(nodes[0].node);
+       assert!(monitor.watch_channel(OutPoint { txid: chan.3.txid(), index: 0 }, chain_monitor).is_ok());
+       nodes[0].chain_monitor = &monitor;
+       nodes[0].chain_source = &chain_source;
 
        check_added_monitors!(nodes[0], 1);
 
@@ -7535,8 +7559,8 @@ fn test_data_loss_protect() {
        check_spends!(node_txn[0], chan.3);
        assert_eq!(node_txn[0].output.len(), 2);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()]}, 0);
-       connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
+       connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[0].clone()]}, 0);
+       connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
        let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
        assert_eq!(spend_txn.len(), 1);
        check_spends!(spend_txn[0], node_txn[0]);
@@ -7684,12 +7708,12 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        }
 
        // Connect blocks to change height_timer range to see if we use right soonest_timelock
-       let header_114 = connect_blocks(&nodes[1].block_notifier, 114, 0, false, Default::default());
+       let header_114 = connect_blocks(&nodes[1], 114, 0, false, Default::default());
 
        // Actually revoke tx by claiming a HTLC
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_txn[0].clone()] }, 115);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_txn[0].clone()] }, 115);
        check_added_monitors!(nodes[1], 1);
 
        // One or more justice tx should have been broadcast, check it
@@ -7708,7 +7732,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        };
 
        // After exhaustion of height timer, a new bumped justice tx should have been broadcast, check it
-       let header = connect_blocks(&nodes[1].block_notifier, 3, 115,  true, header.block_hash());
+       let header = connect_blocks(&nodes[1], 3, 115,  true, header.block_hash());
        let mut penalty_2 = penalty_1;
        let mut feerate_2 = 0;
        {
@@ -7731,7 +7755,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        assert_ne!(feerate_2, 0);
 
        // After exhaustion of height timer for a 2nd time, a new bumped justice tx should have been broadcast, check it
-       connect_blocks(&nodes[1].block_notifier, 3, 118, true, header);
+       connect_blocks(&nodes[1], 3, 118, true, header);
        let penalty_3;
        let mut feerate_3 = 0;
        {
@@ -7781,7 +7805,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
 
@@ -7805,10 +7829,10 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        // Broadcast set of revoked txn on A
        let header_128 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_128, txdata: vec![revoked_local_txn[0].clone()] }, 128);
+       connect_block(&nodes[0], &Block { header: header_128, txdata: vec![revoked_local_txn[0].clone()] }, 128);
        expect_pending_htlcs_forwardable_ignore!(nodes[0]);
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129);
+       connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()] }, 129);
        let first;
        let feerate_1;
        let penalty_txn;
@@ -7859,9 +7883,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
 
        // Connect one more block to see if bumped penalty are issued for HTLC txn
        let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_130, txdata: penalty_txn }, 130);
+       connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
        let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_131, txdata: Vec::new() }, 131);
+       connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() }, 131);
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
@@ -7880,9 +7904,9 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        };
 
        // Few more blocks to confirm penalty txn
-       let header_135 = connect_blocks(&nodes[0].block_notifier, 4, 131, true, header_131.block_hash());
+       let header_135 = connect_blocks(&nodes[0], 4, 131, true, header_131.block_hash());
        assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
-       let header_144 = connect_blocks(&nodes[0].block_notifier, 9, 135, true, header_135);
+       let header_144 = connect_blocks(&nodes[0], 9, 135, true, header_135);
        let node_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 1);
@@ -7900,8 +7924,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        };
        // Broadcast claim txn and confirm blocks to avoid further bumps on this outputs
        let header_145 = BlockHeader { version: 0x20000000, prev_blockhash: header_144, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_145, txdata: node_txn }, 145);
-       connect_blocks(&nodes[0].block_notifier, 20, 145, true, header_145.block_hash());
+       connect_block(&nodes[0], &Block { header: header_145, txdata: node_txn }, 145);
+       connect_blocks(&nodes[0], 20, 145, true, header_145.block_hash());
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // We verify than no new transaction has been broadcast because previously
@@ -7944,7 +7968,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        // Claim a HTLC without revocation (provide B monitor with preimage)
        nodes[1].node.claim_funds(payment_preimage, &None, 3_000_000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
+       connect_block(&nodes[1], &Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
        check_added_monitors!(nodes[1], 2);
 
        // One or more claim tx should have been broadcast, check it
@@ -7989,7 +8013,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        assert_ne!(feerate_preimage, 0);
 
        // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
-       connect_blocks(&nodes[1].block_notifier, 15, 1,  true, header.block_hash());
+       connect_blocks(&nodes[1], 15, 1,  true, header.block_hash());
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 2);
@@ -8053,7 +8077,7 @@ fn test_set_outpoints_partial_claiming() {
        check_spends!(remote_txn[2], remote_txn[0]);
 
        // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
-       let prev_header_100 = connect_blocks(&nodes[1].block_notifier, 100, 0, false, Default::default());
+       let prev_header_100 = connect_blocks(&nodes[1], 100, 0, false, Default::default());
        // Provide node A with both preimage
        nodes[0].node.claim_funds(payment_preimage_1, &None, 3_000_000);
        nodes[0].node.claim_funds(payment_preimage_2, &None, 3_000_000);
@@ -8063,7 +8087,7 @@ fn test_set_outpoints_partial_claiming() {
 
        // Connect blocks on node A commitment transaction
        let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
+       connect_block(&nodes[0], &Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
@@ -8080,7 +8104,7 @@ fn test_set_outpoints_partial_claiming() {
        }
 
        // Connect blocks on node B
-       connect_blocks(&nodes[1].block_notifier, 135, 0, false, Default::default());
+       connect_blocks(&nodes[1], 135, 0, false, Default::default());
        check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
@@ -8096,7 +8120,7 @@ fn test_set_outpoints_partial_claiming() {
 
        // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
        let header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
+       connect_block(&nodes[0], &Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 1);
@@ -8107,7 +8131,7 @@ fn test_set_outpoints_partial_claiming() {
        nodes[0].node.get_and_clear_pending_msg_events();
 
        // Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
-       nodes[0].block_notifier.block_disconnected(&header, 102);
+       disconnect_block(&nodes[0], &header, 102);
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 1);
@@ -8117,8 +8141,8 @@ fn test_set_outpoints_partial_claiming() {
        }
 
        //// Disconnect one more block and then reconnect multiple no transaction should be generated
-       nodes[0].block_notifier.block_disconnected(&header, 101);
-       connect_blocks(&nodes[1].block_notifier, 15, 101, false, prev_header_100);
+       disconnect_block(&nodes[0], &header, 101);
+       connect_blocks(&nodes[1], 15, 101, false, prev_header_100);
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
@@ -8178,11 +8202,11 @@ fn test_bump_txn_sanitize_tracking_maps() {
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 9_000_000);
 
        // Broadcast set of revoked txn on A
-       let header_128 = connect_blocks(&nodes[0].block_notifier, 128, 0,  false, Default::default());
+       let header_128 = connect_blocks(&nodes[0], 128, 0,  false, Default::default());
        expect_pending_htlcs_forwardable_ignore!(nodes[0]);
 
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129);
+       connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129);
        check_closed_broadcast!(nodes[0], false);
        check_added_monitors!(nodes[0], 1);
        let penalty_txn = {
@@ -8196,10 +8220,10 @@ fn test_bump_txn_sanitize_tracking_maps() {
                penalty_txn
        };
        let header_130 = BlockHeader { version: 0x20000000, prev_blockhash: header_129.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       nodes[0].block_notifier.block_connected(&Block { header: header_130, txdata: penalty_txn }, 130);
-       connect_blocks(&nodes[0].block_notifier, 5, 130,  false, header_130.block_hash());
+       connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
+       connect_blocks(&nodes[0], 5, 130,  false, header_130.block_hash());
        {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
                        assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
                        assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
@@ -8328,23 +8352,23 @@ fn test_update_err_monitor_lockdown() {
        // Route a HTLC from node 0 to node 1 (but don't settle)
        let preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
 
-       // Copy SimpleManyChannelMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
+       // Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
+       let chain_source = test_utils::TestChainSource::new(Network::Testnet);
        let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
-       let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
        let watchtower = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
-               assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok());
+               let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower.simple_monitor.block_connected(&header, 200, &vec![], &vec![]);
+       watchtower.chain_monitor.block_connected(&header, &[], 200);
 
        // Try to update ChannelMonitor
        assert!(nodes[1].node.claim_funds(preimage, &None, 9_000_000));
@@ -8354,8 +8378,8 @@ fn test_update_err_monitor_lockdown() {
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
-                       if let Err(_) =  watchtower.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chan_monitor.update_monitor(outpoint, update) {} else { assert!(false); }
+                       if let Err(_) =  watchtower.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
@@ -8386,23 +8410,23 @@ fn test_concurrent_monitor_claim() {
        // Route a HTLC from node 0 to node 1 (but don't settle)
        route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
 
-       // Copy SimpleManyChannelMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
+       // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain
+       let chain_source = test_utils::TestChainSource::new(Network::Testnet);
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
-       let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
        let watchtower_alice = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
-               assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok());
+               let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower_alice.simple_monitor.block_connected(&header, 135, &vec![], &vec![]);
+       watchtower_alice.chain_monitor.block_connected(&header, &vec![], 135);
 
        // Watchtower Alice should have broadcast a commitment/HTLC-timeout
        {
@@ -8411,23 +8435,23 @@ fn test_concurrent_monitor_claim() {
                txn.clear();
        }
 
-       // Copy SimpleManyChannelMonitor to simulate watchtower Bob and make it receive a commitment update first.
+       // Copy ChainMonitor to simulate watchtower Bob and make it receive a commitment update first.
+       let chain_source = test_utils::TestChainSource::new(Network::Testnet);
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
-       let chain_monitor = chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet);
        let watchtower_bob = {
-               let monitors = nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap();
+               let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&outpoint).unwrap();
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
                                &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
                assert!(new_monitor == *monitor);
-               let watchtower = test_utils::TestChannelMonitor::new(&chain_monitor, &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
-               assert!(watchtower.add_monitor(outpoint, new_monitor).is_ok());
+               let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator);
+               assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
                watchtower
        };
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-       watchtower_bob.simple_monitor.block_connected(&header, 134, &vec![], &vec![]);
+       watchtower_bob.chain_monitor.block_connected(&header, &vec![], 134);
 
        // Route another payment to generate another update with still previous HTLC pending
        let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -8444,16 +8468,16 @@ fn test_concurrent_monitor_claim() {
        if let Some(ref mut channel) = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2) {
                if let Ok((_, _, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].fee_estimator, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
-                       if let Err(_) =  watchtower_alice.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = watchtower_bob.simple_monitor.update_monitor(outpoint, update.clone()) {} else { assert!(false); }
-                       if let Ok(_) = nodes[0].chan_monitor.update_monitor(outpoint, update) {} else { assert!(false); }
+                       if let Err(_) =  watchtower_alice.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = watchtower_bob.chain_monitor.update_channel(outpoint, update.clone()) {} else { assert!(false); }
+                       if let Ok(_) = nodes[0].chain_monitor.update_channel(outpoint, update) {} else { assert!(false); }
                } else { assert!(false); }
        } else { assert!(false); };
        // Our local monitor is in-sync and hasn't processed yet timeout
        check_added_monitors!(nodes[0], 1);
 
        //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout
-       watchtower_bob.simple_monitor.block_connected(&header, 135, &vec![], &vec![]);
+       watchtower_bob.chain_monitor.block_connected(&header, &vec![], 135);
 
        // Watchtower Bob should have broadcast a commitment/HTLC-timeout
        let bob_state_y;
@@ -8465,7 +8489,7 @@ fn test_concurrent_monitor_claim() {
        };
 
        // We confirm Bob's state Y on Alice, she should broadcast a HTLC-timeout
-       watchtower_alice.simple_monitor.block_connected(&header, 136, &vec![&bob_state_y][..], &vec![]);
+       watchtower_alice.chain_monitor.block_connected(&header, &vec![(0, &bob_state_y)], 136);
        {
                let htlc_txn = chanmon_cfgs[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // We broadcast twice the transaction, once due to the HTLC-timeout, once due
index 9bb8438f2000d3d51daeacb1028e49b137f3cbf1..cd959a74dc53cc0e176a25e175735011aea6a8ec 100644 (file)
@@ -19,7 +19,6 @@
 //! call into your NetGraphMsgHandler.
 
 pub mod channelmanager;
-pub mod channelmonitor;
 pub mod msgs;
 pub mod peer_handler;
 pub mod chan_utils;
index cad5cc1bb2ea94ee9d386db96ed5a8775b5c64ef..2f1565631ea9fe9ce94244dd3bab6ab71f480a75 100644 (file)
@@ -22,11 +22,11 @@ use bitcoin::secp256k1::{Secp256k1, Signature};
 use bitcoin::secp256k1;
 
 use ln::msgs::DecodeError;
-use ln::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
 use ln::channelmanager::PaymentPreimage;
 use ln::chan_utils;
 use ln::chan_utils::{TxCreationKeys, HolderCommitmentTransaction};
 use chain::chaininterface::{FeeEstimator, BroadcasterInterface, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
+use chain::channelmonitor::{ANTI_REORG_DELAY, CLTV_SHARED_CLAIM_BUFFER, InputMaterial, ClaimRequest};
 use chain::keysinterface::ChannelKeys;
 use util::logger::Logger;
 use util::ser::{Readable, Writer, Writeable};
@@ -405,7 +405,7 @@ impl<ChanSigner: ChannelKeys + Readable> Readable for OnchainTxHandler<ChanSigne
 }
 
 impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
-       pub(super) fn new(destination_script: Script, keys: ChanSigner, on_holder_tx_csv: u16) -> Self {
+       pub(crate) fn new(destination_script: Script, keys: ChanSigner, on_holder_tx_csv: u16) -> Self {
 
                let key_storage = keys;
 
@@ -425,7 +425,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                }
        }
 
-       pub(super) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
+       pub(crate) fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
                let mut tx_weight = 2; // count segwit flags
                for inp in inputs {
                        // We use expected weight (and not actual) as signatures and time lock delays may vary
@@ -657,7 +657,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                None
        }
 
-       pub(super) fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+       pub(crate) fn block_connected<B: Deref, F: Deref, L: Deref>(&mut self, txn_matched: &[&Transaction], claimable_outpoints: Vec<ClaimRequest>, height: u32, broadcaster: B, fee_estimator: F, logger: L)
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator,
                                        L::Target: Logger,
@@ -829,7 +829,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                }
        }
 
-       pub(super) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
+       pub(crate) fn block_disconnected<B: Deref, F: Deref, L: Deref>(&mut self, height: u32, broadcaster: B, fee_estimator: F, logger: L)
                where B::Target: BroadcasterInterface,
                      F::Target: FeeEstimator,
                                        L::Target: Logger,
@@ -877,7 +877,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                }
        }
 
-       pub(super) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
+       pub(crate) fn provide_latest_holder_tx(&mut self, tx: HolderCommitmentTransaction) {
                self.prev_holder_commitment = self.holder_commitment.take();
                self.holder_commitment = Some(tx);
        }
@@ -919,7 +919,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
        // have empty holder commitment transaction if a ChannelMonitor is asked to force-close just after Channel::get_outbound_funding_created,
        // before providing a initial commitment transaction. For outbound channel, init ChannelMonitor at Channel::funding_signed, there is nothing
        // to monitor before.
-       pub(super) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
+       pub(crate) fn get_fully_signed_holder_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
                if let Some(ref mut holder_commitment) = self.holder_commitment {
                        match self.key_storage.sign_holder_commitment(holder_commitment, &self.secp_ctx) {
                                Ok(sig) => Some(holder_commitment.add_holder_sig(funding_redeemscript, sig)),
@@ -931,7 +931,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
        }
 
        #[cfg(any(test, feature="unsafe_revoked_tx_signing"))]
-       pub(super) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
+       pub(crate) fn get_fully_signed_copy_holder_tx(&mut self, funding_redeemscript: &Script) -> Option<Transaction> {
                if let Some(ref mut holder_commitment) = self.holder_commitment {
                        let holder_commitment = holder_commitment.clone();
                        match self.key_storage.sign_holder_commitment(&holder_commitment, &self.secp_ctx) {
@@ -943,7 +943,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
                }
        }
 
-       pub(super) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+       pub(crate) fn get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
                let mut htlc_tx = None;
                if self.holder_commitment.is_some() {
                        let commitment_txid = self.holder_commitment.as_ref().unwrap().txid();
@@ -971,7 +971,7 @@ impl<ChanSigner: ChannelKeys> OnchainTxHandler<ChanSigner> {
        }
 
        #[cfg(any(test,feature = "unsafe_revoked_tx_signing"))]
-       pub(super) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
+       pub(crate) fn unsafe_get_fully_signed_htlc_tx(&mut self, outp: &::bitcoin::OutPoint, preimage: &Option<PaymentPreimage>) -> Option<Transaction> {
                let latest_had_sigs = self.holder_htlc_sigs.is_some();
                let prev_had_sigs = self.prev_holder_htlc_sigs.is_some();
                let ret = self.get_fully_signed_htlc_tx(outp, preimage);
index 3db4c4d52f16fcedcb2391e9e6270a0005fe1bbf..bf2709cded67badeb799e261e66c324bbf1c3c46 100644 (file)
@@ -11,8 +11,8 @@
 //! These tests work by standing up full nodes and route payments across the network, checking the
 //! returned errors decode to the correct thing.
 
+use chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
 use ln::channelmanager::{HTLCForwardInfo, PaymentPreimage, PaymentHash};
-use ln::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS};
 use ln::onion_utils;
 use routing::router::{Route, get_route};
 use ln::features::InitFeatures;
@@ -23,7 +23,7 @@ use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsPro
 use util::ser::{Writeable, Writer};
 use util::config::UserConfig;
 
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::{Block, BlockHeader};
 use bitcoin::hash_types::BlockHash;
 
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -59,9 +59,12 @@ fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case:
 {
 
        // reset block height
-       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let block = Block {
+               header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+               txdata: vec![],
+       };
        for ix in 0..nodes.len() {
-               nodes[ix].block_notifier.block_connected_checked(&header, 1, &[], &[]);
+               connect_block(&nodes[ix], &block, 1);
        }
 
        macro_rules! expect_event {
@@ -440,9 +443,12 @@ fn test_onion_failure() {
 
        run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, |msg| {
                let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               let block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
 
-               nodes[1].block_notifier.block_connected_checked(&header, height, &[], &[]);
+               connect_block(&nodes[1], &block, height);
        }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
 
        run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, |_| {}, || {
@@ -451,9 +457,12 @@ fn test_onion_failure() {
 
        run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| {
                let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
-               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               let block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
 
-               nodes[2].block_notifier.block_connected_checked(&header, height, &[], &[]);
+               connect_block(&nodes[2], &block, height);
        }, || {}, true, Some(17), None);
 
        run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, |_| {}, || {
index cb274b5e903ef6e4cdcc5e96730cebe696d005ef..7f76ea5b3d31c88a8401eac2ff0fdbd722ede5e2 100644 (file)
@@ -9,7 +9,7 @@
 
 //! Further functional tests which test blockchain reorganizations.
 
-use ln::channelmonitor::ANTI_REORG_DELAY;
+use chain::channelmonitor::ANTI_REORG_DELAY;
 use ln::features::InitFeatures;
 use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
 use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
@@ -51,8 +51,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        check_added_monitors!(nodes[2], 1);
        get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
 
-       let mut headers = Vec::new();
-       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        let claim_txn = if local_commitment {
                // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
                let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
@@ -62,7 +61,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0]);
 
                // Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
-               nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+               connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
                check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -73,7 +72,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]);
 
                // Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+               connect_block(&nodes[1], &Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
 
                // ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
                vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
@@ -86,7 +85,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_2_commitment_txn[1], node_2_commitment_txn[0]);
 
                // Give node 1 node 2's commitment transaction and get its response (timing the HTLC out)
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
                let node_1_commitment_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_1_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Timeout, ChannelManger: 1 local commitment tx, 1 Offered HTLC-Timeout
                assert_eq!(node_1_commitment_txn[1].output.len(), 2); // to-local and Offered HTLC (to-remote is dust)
@@ -95,39 +94,50 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                check_spends!(node_1_commitment_txn[0], node_2_commitment_txn[0]);
 
                // Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+               connect_block(&nodes[1], &Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
                // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
                node_2_commitment_txn
        };
        check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
-       headers.push(header.clone());
+       let mut block = Block { header, txdata: vec![] };
+       let mut blocks = Vec::new();
+       blocks.push(block.clone());
        // At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH +
        // ANTI_REORG_DELAY - 1 will give us a confirmation count of ANTI_REORG_DELAY - 1.
        for i in CHAN_CONFIRM_DEPTH + 2..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1 {
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[1].block_notifier.block_connected_checked(&header, i, &vec![], &[0; 0]);
-               headers.push(header.clone());
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(&nodes[1], &block, i);
+               blocks.push(block.clone());
        }
        check_added_monitors!(nodes[1], 0);
        assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
 
        if claim {
                // Now reorg back to CHAN_CONFIRM_DEPTH and confirm node 2's broadcasted transactions:
-               for (height, header) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(headers.iter()).rev() {
-                       nodes[1].block_notifier.block_disconnected(&header, height);
+               for (height, block) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(blocks.iter()).rev() {
+                       disconnect_block(&nodes[1], &block.header, height);
                }
 
-               header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[1].block_notifier.block_connected(&Block { header, txdata: claim_txn }, CHAN_CONFIRM_DEPTH + 1);
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: claim_txn,
+               };
+               connect_block(&nodes[1], &block, CHAN_CONFIRM_DEPTH + 1);
 
-               // ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_monitor_events when we
+               // ChannelManager only polls chain::Watch::release_pending_monitor_events when we
                // probe it for events, so we probe non-message events here (which should still end up empty):
                assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
        } else {
                // Confirm the timeout tx and check that we fail the HTLC backwards
-               header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
-               nodes[1].block_notifier.block_connected_checked(&header, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY, &vec![], &[0; 0]);
+               block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: block.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![],
+               };
+               connect_block(&nodes[1], &block, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY);
                expect_pending_htlcs_forwardable!(nodes[1]);
        }
 
index 1a4a8c1a637e7a1e0c0df8a0294f0750265f40b8..cafe4fc10a38f5c51259256c3c87fe6e1248a90b 100644 (file)
@@ -16,9 +16,11 @@ use bitcoin::secp256k1;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hashes::Hash;
 use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::transaction::TxOut;
 use bitcoin::blockdata::opcodes;
 
-use chain::chaininterface::{ChainError, ChainWatchInterface};
+use chain;
+use chain::Access;
 use ln::features::{ChannelFeatures, NodeFeatures};
 use ln::msgs::{DecodeError, ErrorAction, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
 use ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, OptionalField};
@@ -51,22 +53,22 @@ pub struct LockedNetworkGraph<'a>(pub RwLockReadGuard<'a, NetworkGraph>);
 /// This network graph is then used for routing payments.
 /// Provides interface to help with initial routing sync by
 /// serving historical announcements.
-pub struct NetGraphMsgHandler<C: Deref, L: Deref> where C::Target: ChainWatchInterface, L::Target: Logger {
+pub struct NetGraphMsgHandler<C: Deref, L: Deref> where C::Target: chain::Access, L::Target: Logger {
        secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
        /// Representation of the payment channel network
        pub network_graph: RwLock<NetworkGraph>,
-       chain_monitor: C,
+       chain_access: Option<C>,
        full_syncs_requested: AtomicUsize,
        logger: L,
 }
 
-impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: ChainWatchInterface, L::Target: Logger {
+impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
        /// Creates a new tracker of the actual state of the network of channels and nodes,
        /// assuming a fresh network graph.
        /// Chain monitor is used to make sure announced channels exist on-chain,
        /// channel data is correct, and that the announcement is signed with
        /// channel owners' keys.
-       pub fn new(chain_monitor: C, logger: L) -> Self {
+       pub fn new(chain_access: Option<C>, logger: L) -> Self {
                NetGraphMsgHandler {
                        secp_ctx: Secp256k1::verification_only(),
                        network_graph: RwLock::new(NetworkGraph {
@@ -74,19 +76,19 @@ impl<C: Deref, L: Deref> NetGraphMsgHandler<C, L> where C::Target: ChainWatchInt
                                nodes: BTreeMap::new(),
                        }),
                        full_syncs_requested: AtomicUsize::new(0),
-                       chain_monitor,
+                       chain_access,
                        logger,
                }
        }
 
        /// Creates a new tracker of the actual state of the network of channels and nodes,
        /// assuming an existing Network Graph.
-       pub fn from_net_graph(chain_monitor: C, logger: L, network_graph: NetworkGraph) -> Self {
+       pub fn from_net_graph(chain_access: Option<C>, logger: L, network_graph: NetworkGraph) -> Self {
                NetGraphMsgHandler {
                        secp_ctx: Secp256k1::verification_only(),
                        network_graph: RwLock::new(network_graph),
                        full_syncs_requested: AtomicUsize::new(0),
-                       chain_monitor,
+                       chain_access,
                        logger,
                }
        }
@@ -117,7 +119,7 @@ macro_rules! secp_verify_sig {
        };
 }
 
-impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: ChainWatchInterface, L::Target: Logger {
+impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
        fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
                self.network_graph.write().unwrap().update_node_from_announcement(msg, Some(&self.secp_ctx))
        }
@@ -127,29 +129,33 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                        return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
                }
 
-               let utxo_value = match self.chain_monitor.get_chain_utxo(msg.contents.chain_hash, msg.contents.short_channel_id) {
-                       Ok((script_pubkey, value)) => {
-                               let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
-                                                                   .push_slice(&msg.contents.bitcoin_key_1.serialize())
-                                                                   .push_slice(&msg.contents.bitcoin_key_2.serialize())
-                                                                   .push_opcode(opcodes::all::OP_PUSHNUM_2)
-                                                                   .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
-                               if script_pubkey != expected_script {
-                                       return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError});
-                               }
-                               //TODO: Check if value is worth storing, use it to inform routing, and compare it
-                               //to the new HTLC max field in channel_update
-                               Some(value)
-                       },
-                       Err(ChainError::NotSupported) => {
+               let utxo_value = match &self.chain_access {
+                       &None => {
                                // Tentatively accept, potentially exposing us to DoS attacks
                                None
                        },
-                       Err(ChainError::NotWatched) => {
-                               return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.contents.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError});
-                       },
-                       Err(ChainError::UnknownTx) => {
-                               return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError});
+                       &Some(ref chain_access) => {
+                               match chain_access.get_utxo(&msg.contents.chain_hash, msg.contents.short_channel_id) {
+                                       Ok(TxOut { value, script_pubkey }) => {
+                                               let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
+                                                                                   .push_slice(&msg.contents.bitcoin_key_1.serialize())
+                                                                                   .push_slice(&msg.contents.bitcoin_key_2.serialize())
+                                                                                   .push_opcode(opcodes::all::OP_PUSHNUM_2)
+                                                                                   .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
+                                               if script_pubkey != expected_script {
+                                                       return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError});
+                                               }
+                                               //TODO: Check if value is worth storing, use it to inform routing, and compare it
+                                               //to the new HTLC max field in channel_update
+                                               Some(value)
+                                       },
+                                       Err(chain::AccessError::UnknownChain) => {
+                                               return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.contents.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError});
+                                       },
+                                       Err(chain::AccessError::UnknownTx) => {
+                                               return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError});
+                                       },
+                               }
                        },
                };
                let result = self.network_graph.write().unwrap().update_channel_from_announcement(msg, utxo_value, Some(&self.secp_ctx));
@@ -828,7 +834,7 @@ impl NetworkGraph {
 
 #[cfg(test)]
 mod tests {
-       use chain::chaininterface;
+       use chain;
        use ln::features::{ChannelFeatures, NodeFeatures};
        use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
        use ln::msgs::{OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
@@ -843,6 +849,7 @@ mod tests {
        use bitcoin::network::constants::Network;
        use bitcoin::blockdata::constants::genesis_block;
        use bitcoin::blockdata::script::Builder;
+       use bitcoin::blockdata::transaction::TxOut;
        use bitcoin::blockdata::opcodes;
 
        use hex;
@@ -852,11 +859,10 @@ mod tests {
 
        use std::sync::Arc;
 
-       fn create_net_graph_msg_handler() -> (Secp256k1<All>, NetGraphMsgHandler<Arc<chaininterface::ChainWatchInterfaceUtil>, Arc<test_utils::TestLogger>>) {
+       fn create_net_graph_msg_handler() -> (Secp256k1<All>, NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>) {
                let secp_ctx = Secp256k1::new();
                let logger = Arc::new(test_utils::TestLogger::new());
-               let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet));
-               let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor, Arc::clone(&logger));
+               let net_graph_msg_handler = NetGraphMsgHandler::new(None, Arc::clone(&logger));
                (secp_ctx, net_graph_msg_handler)
        }
 
@@ -981,9 +987,6 @@ mod tests {
        fn handling_channel_announcements() {
                let secp_ctx = Secp256k1::new();
                let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
-               let chain_monitor = Arc::new(test_utils::TestChainWatcher::new());
-               let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor.clone(), Arc::clone(&logger));
-
 
                let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
                let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
@@ -1020,8 +1023,7 @@ mod tests {
                };
 
                // Test if the UTXO lookups were not supported
-               *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::NotSupported);
-
+               let mut net_graph_msg_handler = NetGraphMsgHandler::new(None, Arc::clone(&logger));
                match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) {
                        Ok(res) => assert!(res),
                        _ => panic!()
@@ -1035,7 +1037,6 @@ mod tests {
                        }
                }
 
-
                // If we receive announcement for the same channel (with UTXO lookups disabled),
                // drop new one on the floor, since we can't see any changes.
                match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) {
@@ -1043,9 +1044,10 @@ mod tests {
                        Err(e) => assert_eq!(e.err, "Already have knowledge of channel")
                };
 
-
                // Test if an associated transaction were not on-chain (or not confirmed).
-               *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx);
+               let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
+               *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx);
+               net_graph_msg_handler = NetGraphMsgHandler::new(Some(chain_source.clone()), Arc::clone(&logger));
                unsigned_announcement.short_channel_id += 1;
 
                msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
@@ -1062,10 +1064,9 @@ mod tests {
                        Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry")
                };
 
-
                // Now test if the transaction is found in the UTXO set and the script is correct.
                unsigned_announcement.short_channel_id += 1;
-               *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script.clone(), 0));
+               *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script.clone() });
 
                msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                let valid_announcement = ChannelAnnouncement {
@@ -1090,14 +1091,14 @@ mod tests {
 
                // If we receive announcement for the same channel (but TX is not confirmed),
                // drop new one on the floor, since we can't see any changes.
-               *chain_monitor.utxo_ret.lock().unwrap() = Err(chaininterface::ChainError::UnknownTx);
+               *chain_source.utxo_ret.lock().unwrap() = Err(chain::AccessError::UnknownTx);
                match net_graph_msg_handler.handle_channel_announcement(&valid_announcement) {
                        Ok(_) => panic!(),
                        Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry")
                };
 
                // But if it is confirmed, replace the channel
-               *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script, 0));
+               *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: 0, script_pubkey: good_script });
                unsigned_announcement.features = ChannelFeatures::empty();
                msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
                let valid_announcement = ChannelAnnouncement {
@@ -1169,8 +1170,8 @@ mod tests {
        fn handling_channel_update() {
                let secp_ctx = Secp256k1::new();
                let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
-               let chain_monitor = Arc::new(test_utils::TestChainWatcher::new());
-               let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor.clone(), Arc::clone(&logger));
+               let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
+               let net_graph_msg_handler = NetGraphMsgHandler::new(Some(chain_source.clone()), Arc::clone(&logger));
 
                let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
                let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
@@ -1191,7 +1192,7 @@ mod tests {
                           .push_slice(&PublicKey::from_secret_key(&secp_ctx, node_2_btckey).serialize())
                           .push_opcode(opcodes::all::OP_PUSHNUM_2)
                           .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
-                       *chain_monitor.utxo_ret.lock().unwrap() = Ok((good_script.clone(), amount_sats));
+                       *chain_source.utxo_ret.lock().unwrap() = Ok(TxOut { value: amount_sats, script_pubkey: good_script.clone() });
                        let unsigned_announcement = UnsignedChannelAnnouncement {
                                features: ChannelFeatures::empty(),
                                chain_hash,
index 1960c7b4e52cde08a5a59c49dd7942d6a11635d0..56beb2ea05f922377b9eba6f855d5577001a7251 100644 (file)
@@ -410,7 +410,6 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, targ
 
 #[cfg(test)]
 mod tests {
-       use chain::chaininterface;
        use routing::router::{get_route, RouteHint, RoutingFees};
        use routing::network_graph::NetGraphMsgHandler;
        use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
@@ -433,7 +432,7 @@ mod tests {
        use std::sync::Arc;
 
        // Using the same keys for LN and BTC ids
-       fn add_channel(net_graph_msg_handler: &NetGraphMsgHandler<Arc<chaininterface::ChainWatchInterfaceUtil>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_1_privkey: &SecretKey,
+       fn add_channel(net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_1_privkey: &SecretKey,
           node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64) {
                let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
                let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
@@ -463,7 +462,7 @@ mod tests {
                };
        }
 
-       fn update_channel(net_graph_msg_handler: &NetGraphMsgHandler<Arc<chaininterface::ChainWatchInterfaceUtil>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, update: UnsignedChannelUpdate) {
+       fn update_channel(net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, update: UnsignedChannelUpdate) {
                let msghash = hash_to_message!(&Sha256dHash::hash(&update.encode()[..])[..]);
                let valid_channel_update = ChannelUpdate {
                        signature: secp_ctx.sign(&msghash, node_privkey),
@@ -478,7 +477,7 @@ mod tests {
        }
 
 
-       fn add_or_update_node(net_graph_msg_handler: &NetGraphMsgHandler<Arc<chaininterface::ChainWatchInterfaceUtil>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey,
+       fn add_or_update_node(net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey,
           features: NodeFeatures, timestamp: u32) {
                let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
                let unsigned_announcement = UnsignedNodeAnnouncement {
@@ -531,11 +530,10 @@ mod tests {
                }
        }
 
-       fn build_graph() -> (Secp256k1<All>, NetGraphMsgHandler<std::sync::Arc<crate::chain::chaininterface::ChainWatchInterfaceUtil>, std::sync::Arc<crate::util::test_utils::TestLogger>>, std::sync::Arc<test_utils::TestLogger>) {
+       fn build_graph() -> (Secp256k1<All>, NetGraphMsgHandler<std::sync::Arc<crate::util::test_utils::TestChainSource>, std::sync::Arc<crate::util::test_utils::TestLogger>>, std::sync::Arc<test_utils::TestLogger>) {
                let secp_ctx = Secp256k1::new();
                let logger = Arc::new(test_utils::TestLogger::new());
-               let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet));
-               let net_graph_msg_handler = NetGraphMsgHandler::new(chain_monitor, Arc::clone(&logger));
+               let net_graph_msg_handler = NetGraphMsgHandler::new(None, Arc::clone(&logger));
                // Build network from our_id to node7:
                //
                //        -1(1)2-  node0  -1(3)2-
index a5403ef56468e64eaefa95cc261aa6fdfbb04d23..a2a45a7b3afc66372ff3e41e6655d9a199b67099 100644 (file)
@@ -42,7 +42,7 @@ pub enum APIError {
                /// A human-readable error message
                err: String
        },
-       /// An attempt to call add/update_monitor returned an Err (ie you did this!), causing the
+       /// An attempt to call watch/update_channel returned an Err (ie you did this!), causing the
        /// attempted action to fail.
        MonitorUpdateFailed,
 }
index b42333beae07c378f80948492e6140e3321b5070..c2c5122d06c47e040db0c6da264d24c6616b8bb6 100644 (file)
@@ -72,7 +72,7 @@ impl<'a, T> std::fmt::Display for DebugFundingInfo<'a, T> {
 }
 macro_rules! log_funding_info {
        ($key_storage: expr) => {
-               ::util::macro_logger::DebugFundingInfo(&$key_storage.funding_info)
+               ::util::macro_logger::DebugFundingInfo($key_storage.get_funding_txo())
        }
 }
 
index bdd4620cb3f763bbccdf268a6bb1ea84fa784d7e..0370c0e1a402150bbdd749d892e10f895573a1a6 100644 (file)
@@ -7,27 +7,28 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+use chain;
 use chain::chaininterface;
-use chain::chaininterface::{ConfirmationTarget, ChainError, ChainWatchInterface};
+use chain::chaininterface::ConfirmationTarget;
+use chain::chainmonitor;
+use chain::channelmonitor;
+use chain::channelmonitor::MonitorEvent;
 use chain::transaction::OutPoint;
 use chain::keysinterface;
-use ln::channelmonitor;
 use ln::features::{ChannelFeatures, InitFeatures};
 use ln::msgs;
 use ln::msgs::OptionalField;
-use ln::channelmonitor::MonitorEvent;
 use util::enforcing_trait_impls::EnforcingChannelKeys;
 use util::events;
 use util::logger::{Logger, Level, Record};
 use util::ser::{Readable, Writer, Writeable};
 
 use bitcoin::blockdata::constants::genesis_block;
-use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
 use bitcoin::blockdata::script::{Builder, Script};
-use bitcoin::blockdata::block::Block;
 use bitcoin::blockdata::opcodes;
 use bitcoin::network::constants::Network;
-use bitcoin::hash_types::{Txid, BlockHash};
+use bitcoin::hash_types::{BlockHash, Txid};
 
 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, Signature};
 
@@ -37,7 +38,7 @@ use std::time::Duration;
 use std::sync::Mutex;
 use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
 use std::{cmp, mem};
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
 
 pub struct TestVecWriter(pub Vec<u8>);
 impl Writer for TestVecWriter {
@@ -59,30 +60,30 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
        }
 }
 
-pub struct TestChannelMonitor<'a> {
+pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
        pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
-       pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a ChainWatchInterface>,
+       pub chain_monitor: chainmonitor::ChainMonitor<EnforcingChannelKeys, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
        // If this is set to Some(), after the next return, we'll always return this until update_ret
        // is changed:
        pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
 }
-impl<'a> TestChannelMonitor<'a> {
-       pub fn new(chain_monitor: &'a chaininterface::ChainWatchInterface, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator) -> Self {
+impl<'a> TestChainMonitor<'a> {
+       pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
                        latest_monitor_update_id: Mutex::new(HashMap::new()),
-                       simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
+                       chain_monitor: chainmonitor::ChainMonitor::new(chain_source, broadcaster, logger, fee_estimator),
                        update_ret: Mutex::new(Ok(())),
                        next_update_ret: Mutex::new(None),
                }
        }
 }
-impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> {
+impl<'a> chain::Watch for TestChainMonitor<'a> {
        type Keys = EnforcingChannelKeys;
 
-       fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
@@ -92,7 +93,7 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> {
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
-               assert!(self.simple_monitor.add_monitor(funding_txo, new_monitor).is_ok());
+               assert!(self.chain_monitor.watch_channel(funding_txo, new_monitor).is_ok());
 
                let ret = self.update_ret.lock().unwrap().clone();
                if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
@@ -101,7 +102,7 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> {
                ret
        }
 
-       fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+       fn update_channel(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                // Every monitor update should survive roundtrip
                let mut w = TestVecWriter(Vec::new());
                update.write(&mut w).unwrap();
@@ -109,10 +110,10 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> {
                                &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
 
                self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
-               assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok());
+               assert!(self.chain_monitor.update_channel(funding_txo, update).is_ok());
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
-               let monitors = self.simple_monitor.monitors.lock().unwrap();
+               let monitors = self.chain_monitor.monitors.lock().unwrap();
                let monitor = monitors.get(&funding_txo).unwrap();
                w.0.clear();
                monitor.write_for_disk(&mut w).unwrap();
@@ -128,8 +129,8 @@ impl<'a> channelmonitor::ManyChannelMonitor for TestChannelMonitor<'a> {
                ret
        }
 
-       fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
-               return self.simple_monitor.get_and_clear_pending_monitor_events();
+       fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
+               return self.chain_monitor.release_pending_monitor_events();
        }
 }
 
@@ -393,27 +394,41 @@ impl TestKeysInterface {
        }
 }
 
-pub struct TestChainWatcher {
-       pub utxo_ret: Mutex<Result<(Script, u64), ChainError>>,
+pub struct TestChainSource {
+       pub genesis_hash: BlockHash,
+       pub utxo_ret: Mutex<Result<TxOut, chain::AccessError>>,
+       pub watched_txn: Mutex<HashSet<(Txid, Script)>>,
+       pub watched_outputs: Mutex<HashSet<(OutPoint, Script)>>,
 }
 
-impl TestChainWatcher {
-       pub fn new() -> Self {
-               let script = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
-               Self { utxo_ret: Mutex::new(Ok((script, u64::max_value()))) }
+impl TestChainSource {
+       pub fn new(network: Network) -> Self {
+               let script_pubkey = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
+               Self {
+                       genesis_hash: genesis_block(network).block_hash(),
+                       utxo_ret: Mutex::new(Ok(TxOut { value: u64::max_value(), script_pubkey })),
+                       watched_txn: Mutex::new(HashSet::new()),
+                       watched_outputs: Mutex::new(HashSet::new()),
+               }
        }
 }
 
-impl ChainWatchInterface for TestChainWatcher {
-       fn install_watch_tx(&self, _txid: &Txid, _script_pub_key: &Script) { }
-       fn install_watch_outpoint(&self, _outpoint: (Txid, u32), _out_script: &Script) { }
-       fn watch_all_txn(&self) { }
-       fn filter_block<'a>(&self, _block: &'a Block) -> Vec<usize> {
-               Vec::new()
-       }
-       fn reentered(&self) -> usize { 0 }
+impl chain::Access for TestChainSource {
+       fn get_utxo(&self, genesis_hash: &BlockHash, _short_channel_id: u64) -> Result<TxOut, chain::AccessError> {
+               if self.genesis_hash != *genesis_hash {
+                       return Err(chain::AccessError::UnknownChain);
+               }
 
-       fn get_chain_utxo(&self, _genesis_hash: BlockHash, _unspent_tx_output_identifier: u64) -> Result<(Script, u64), ChainError> {
                self.utxo_ret.lock().unwrap().clone()
        }
 }
+
+impl chain::Filter for TestChainSource {
+       fn register_tx(&self, txid: &Txid, script_pubkey: &Script) {
+               self.watched_txn.lock().unwrap().insert((*txid, script_pubkey.clone()));
+       }
+
+       fn register_output(&self, outpoint: &OutPoint, script_pubkey: &Script) {
+               self.watched_outputs.lock().unwrap().insert((*outpoint, script_pubkey.clone()));
+       }
+}