Merge pull request #507 from moneyball/patch-2
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Sat, 29 Feb 2020 02:59:34 +0000 (02:59 +0000)
committerGitHub <noreply@github.com>
Sat, 29 Feb 2020 02:59:34 +0000 (02:59 +0000)
Add project tracking and conventions we want to adopt

23 files changed:
CONTRIBUTING.md
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
fuzz/src/router.rs
fuzz/travis-fuzz.sh
lightning-net-tokio/src/lib.rs
lightning/src/chain/chaininterface.rs
lightning/src/chain/keysinterface.rs
lightning/src/chain/transaction.rs
lightning/src/ln/chan_utils.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/channelmonitor.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/mod.rs
lightning/src/ln/peer_handler.rs
lightning/src/ln/reorg_tests.rs [new file with mode: 0644]
lightning/src/util/errors.rs
lightning/src/util/events.rs
lightning/src/util/ser.rs
lightning/src/util/test_utils.rs

index 3e77f5d25e4222a60df1609291fd49f9f378d100..7a0c71f407a912aa31ef8a8fdbcae8fef911dc81 100644 (file)
@@ -7,8 +7,8 @@ testing and patches.
 
 Anyone is invited to contribute without regard to technical experience, "expertise", OSS
 experience, age, or other concern. However, the development of cryptocurrencies demands a
-high-level of rigor, adversial thinking, thorough testing and risk-minimization.
-Any bug may cost users real money. That said we deeply welcome people contributing
+high-level of rigor, adversarial thinking, thorough testing and risk-minimization.
+Any bug may cost users real money. That being said, we deeply welcome people contributing
 for the first time to an open source project or pick up Rust while contributing. Don't be shy,
 you'll learn.
 
index cd22dc59f31ac0927ab623d22c3737c6e7fbd2e9..81380f148de02cedba9a5295e443dcc767a4547f 100644 (file)
@@ -73,56 +73,59 @@ impl Writer for VecWriter {
        }
 }
 
-static mut IN_RESTORE: bool = false;
 pub struct TestChannelMonitor {
-       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys>>,
+       pub logger: Arc<dyn Logger>,
+       pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<BroadcasterInterface>, Arc<FeeEstimator>>>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
-       pub latest_good_update: Mutex<HashMap<OutPoint, Vec<u8>>>,
-       pub latest_update_good: Mutex<HashMap<OutPoint, bool>>,
-       pub latest_updates_good_at_last_ser: Mutex<HashMap<OutPoint, bool>>,
+       // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
+       // logic will automatically force-close our channels for us (as we don't have an up-to-date
+       // monitor implying we are not able to punish misbehaving counterparties). Because this test
+       // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
+       // fully-serialized monitor state here, as well as the corresponding update_id.
+       pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
        pub should_update_manager: atomic::AtomicBool,
 }
 impl TestChannelMonitor {
-       pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<dyn chaininterface::BroadcasterInterface>, logger: Arc<dyn Logger>, feeest: Arc<dyn chaininterface::FeeEstimator>) -> Self {
+       pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<dyn chaininterface::BroadcasterInterface>, logger: Arc<dyn Logger>, feeest: Arc<FeeEstimator>) -> Self {
                Self {
-                       simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest)),
+                       simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)),
+                       logger,
                        update_ret: Mutex::new(Ok(())),
-                       latest_good_update: Mutex::new(HashMap::new()),
-                       latest_update_good: Mutex::new(HashMap::new()),
-                       latest_updates_good_at_last_ser: Mutex::new(HashMap::new()),
+                       latest_monitors: Mutex::new(HashMap::new()),
                        should_update_manager: atomic::AtomicBool::new(false),
                }
        }
 }
 impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor {
-       fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
-               let ret = self.update_ret.lock().unwrap().clone();
-               if let Ok(()) = ret {
-                       let mut ser = VecWriter(Vec::new());
-                       monitor.write_for_disk(&mut ser).unwrap();
-                       self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0);
-                       match self.latest_update_good.lock().unwrap().entry(funding_txo) {
-                               hash_map::Entry::Vacant(e) => { e.insert(true); },
-                               hash_map::Entry::Occupied(mut e) => {
-                                       if !e.get() && unsafe { IN_RESTORE } {
-                                               // Technically we can't consider an update to be "good" unless we're doing
-                                               // it in response to a test_restore_channel_monitor as the channel may
-                                               // still be waiting on such a call, so only set us to good if we're in the
-                                               // middle of a restore call.
-                                               e.insert(true);
-                                       }
-                               },
-                       }
-                       self.should_update_manager.store(true, atomic::Ordering::Relaxed);
-               } else {
-                       self.latest_update_good.lock().unwrap().insert(funding_txo, false);
+       fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+               let mut ser = VecWriter(Vec::new());
+               monitor.write_for_disk(&mut ser).unwrap();
+               if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
+                       panic!("Already had monitor pre-add_monitor");
                }
-               assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
-               ret
+               self.should_update_manager.store(true, atomic::Ordering::Relaxed);
+               assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
+               self.update_ret.lock().unwrap().clone()
        }
 
-       fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
-               return self.simple_monitor.fetch_pending_htlc_updated();
+       fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+               let mut map_lock = self.latest_monitors.lock().unwrap();
+               let mut map_entry = match map_lock.entry(funding_txo) {
+                       hash_map::Entry::Occupied(entry) => entry,
+                       hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
+               };
+               let mut deserialized_monitor = <(Sha256d, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
+                       read(&mut Cursor::new(&map_entry.get().1), Arc::clone(&self.logger)).unwrap().1;
+               deserialized_monitor.update_monitor(update.clone()).unwrap();
+               let mut ser = VecWriter(Vec::new());
+               deserialized_monitor.write_for_disk(&mut ser).unwrap();
+               map_entry.insert((update.update_id, ser.0));
+               self.should_update_manager.store(true, atomic::Ordering::Relaxed);
+               self.update_ret.lock().unwrap().clone()
+       }
+
+       fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+               return self.simple_monitor.get_and_clear_pending_htlcs_updated();
        }
 }
 
@@ -192,7 +195,7 @@ pub fn do_test(data: &[u8]) {
                        config.channel_options.fee_proportional_millionths = 0;
                        config.channel_options.announced_channel = true;
                        config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
-                       (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone() as Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>, broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()),
+                       (Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap()),
                        monitor)
                } }
        }
@@ -210,10 +213,10 @@ pub fn do_test(data: &[u8]) {
                        config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
 
                        let mut monitors = HashMap::new();
-                       let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap();
-                       for (outpoint, monitor_ser) in old_monitors.drain() {
+                       let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
+                       for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
                                monitors.insert(outpoint, <(Sha256d, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1);
-                               monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser);
+                               monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
                        }
                        let mut monitor_refs = HashMap::new();
                        for (outpoint, monitor) in monitors.iter_mut() {
@@ -223,24 +226,14 @@ pub fn do_test(data: &[u8]) {
                        let read_args = ChannelManagerReadArgs {
                                keys_manager,
                                fee_estimator: fee_est.clone(),
-                               monitor: monitor.clone() as Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>,
+                               monitor: monitor.clone(),
                                tx_broadcaster: broadcast.clone(),
                                logger,
                                default_config: config,
                                channel_monitors: &mut monitor_refs,
                        };
 
-                       let res = (<(Sha256d, ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor);
-                       for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() {
-                               if !was_good {
-                                       // If the last time we updated a monitor we didn't successfully update (and we
-                                       // have sense updated our serialized copy of the ChannelManager) we may
-                                       // force-close the channel on our counterparty cause we know we're missing
-                                       // something. Thus, we just return here since we can't continue to test.
-                                       return;
-                               }
-                       }
-                       res
+                       (<(Sha256d, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
                } }
        }
 
@@ -266,6 +259,7 @@ pub fn do_test(data: &[u8]) {
                        };
 
                        $source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::supported(), &accept_channel);
+                       let funding_output;
                        {
                                let events = $source.get_and_clear_pending_events();
                                assert_eq!(events.len(), 1);
@@ -273,7 +267,7 @@ pub fn do_test(data: &[u8]) {
                                        let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
                                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                                        }]};
-                                       let funding_output = OutPoint::new(tx.txid(), 0);
+                                       funding_output = OutPoint::new(tx.txid(), 0);
                                        $source.funding_transaction_generated(&temporary_channel_id, funding_output);
                                        channel_txn.push(tx);
                                } else { panic!("Wrong event type"); }
@@ -303,6 +297,7 @@ pub fn do_test(data: &[u8]) {
                                if let events::Event::FundingBroadcastSafe { .. } = events[0] {
                                } else { panic!("Wrong event type"); }
                        }
+                       funding_output
                } }
        }
 
@@ -359,8 +354,8 @@ pub fn do_test(data: &[u8]) {
 
        let mut nodes = [node_a, node_b, node_c];
 
-       make_channel!(nodes[0], nodes[1], 0);
-       make_channel!(nodes[1], nodes[2], 1);
+       let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
+       let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
 
        for node in nodes.iter() {
                confirm_txn!(node);
@@ -631,9 +626,26 @@ pub fn do_test(data: &[u8]) {
                        0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
                        0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
                        0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
-                       0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
-                       0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
-                       0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
+                       0x06 => {
+                               if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
+                                       nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+                               }
+                       },
+                       0x07 => {
+                               if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
+                                       nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+                               }
+                       },
+                       0x24 => {
+                               if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
+                                       nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+                               }
+                       },
+                       0x08 => {
+                               if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
+                                       nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+                               }
+                       },
                        0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
                        0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
                        0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
@@ -722,27 +734,19 @@ pub fn do_test(data: &[u8]) {
                                nodes[2] = node_c.clone();
                                monitor_c = new_monitor_c;
                        },
+                       // 0x24 defined above
                        _ => test_return!(),
                }
 
-               if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
-                       node_a_ser.0.clear();
-                       nodes[0].write(&mut node_a_ser).unwrap();
-                       monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
-                       *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone();
-               }
-               if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) {
-                       node_b_ser.0.clear();
-                       nodes[1].write(&mut node_b_ser).unwrap();
-                       monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
-                       *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone();
-               }
-               if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
-                       node_c_ser.0.clear();
-                       nodes[2].write(&mut node_c_ser).unwrap();
-                       monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
-                       *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone();
-               }
+               node_a_ser.0.clear();
+               nodes[0].write(&mut node_a_ser).unwrap();
+               monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
+               node_b_ser.0.clear();
+               nodes[1].write(&mut node_b_ser).unwrap();
+               monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
+               node_c_ser.0.clear();
+               nodes[2].write(&mut node_c_ser).unwrap();
+               monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
        }
 }
 
index 568f8085131d259282cb164fbb6c7cb0fc882d06..242c5957f943deb42ae13c44a43cfc5af8753015 100644 (file)
@@ -136,9 +136,9 @@ impl<'a> Hash for Peer<'a> {
 }
 
 struct MoneyLossDetector<'a> {
-       manager: Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>>>,
-       monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys>>,
-       handler: PeerManager<Peer<'a>, Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>>>>,
+       manager: Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>>>,
+       monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>,
+       handler: PeerManager<Peer<'a>, Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>>>>,
 
        peers: &'a RefCell<[bool; 256]>,
        funding_txn: Vec<Transaction>,
@@ -150,9 +150,9 @@ struct MoneyLossDetector<'a> {
 }
 impl<'a> MoneyLossDetector<'a> {
        pub fn new(peers: &'a RefCell<[bool; 256]>,
-                  manager: Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>>>,
-                  monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys>>,
-                  handler: PeerManager<Peer<'a>, Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>>>>) -> Self {
+                  manager: Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>>>,
+                  monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>,
+                  handler: PeerManager<Peer<'a>, Arc<ChannelManager<EnforcingChannelKeys, Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<TestBroadcaster>, Arc<FuzzEstimator>>>, Arc<TestBroadcaster>, Arc<KeyProvider>, Arc<FuzzEstimator>>>>) -> Self {
                MoneyLossDetector {
                        manager,
                        monitor,
@@ -217,7 +217,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
                        // Disconnect all peers
                        for (idx, peer) in self.peers.borrow().iter().enumerate() {
                                if *peer {
-                                       self.handler.disconnect_event(&Peer{id: idx as u8, peers_connected: &self.peers});
+                                       self.handler.socket_disconnected(&Peer{id: idx as u8, peers_connected: &self.peers});
                                }
                        }
 
@@ -333,7 +333,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
        config.channel_options.fee_proportional_millionths =  slice_to_be32(get_slice!(4));
        config.channel_options.announced_channel = get_slice!(1)[0] != 0;
        config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
-       let channelmanager = Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone() as Arc<channelmonitor::ManyChannelMonitor<EnforcingChannelKeys>>, broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap());
+       let channelmanager = Arc::new(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap());
        let router = Arc::new(Router::new(PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()), watch.clone(), Arc::clone(&logger)));
 
        let peers = RefCell::new([false; 256]);
@@ -378,7 +378,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
                        2 => {
                                let peer_id = get_slice!(1)[0];
                                if !peers.borrow()[peer_id as usize] { return; }
-                               loss_detector.handler.disconnect_event(&Peer{id: peer_id, peers_connected: &peers});
+                               loss_detector.handler.socket_disconnected(&Peer{id: peer_id, peers_connected: &peers});
                                peers.borrow_mut()[peer_id as usize] = false;
                        },
                        3 => {
index 434e1b657d135de9d49dffdf1a7d83c4be2d2cc5..b0d7b6031486039e98f974065102b1010a5d0677 100644 (file)
@@ -118,7 +118,7 @@ pub fn do_test(data: &[u8]) {
        macro_rules! decode_msg {
                ($MsgType: path, $len: expr) => {{
                        let mut reader = ::std::io::Cursor::new(get_slice!($len));
-                       match <($MsgType)>::read(&mut reader) {
+                       match <$MsgType>::read(&mut reader) {
                                Ok(msg) => msg,
                                Err(e) => match e {
                                        msgs::DecodeError::UnknownVersion => return,
index 5ec431d7ef1818ffccde8700fb23142bb9345a9e..57e326472886e654af93503c88b5611f7a9e2e2a 100755 (executable)
@@ -13,6 +13,7 @@ rm *_target.rs
 popd
 
 cargo install --force honggfuzz
+sed -i 's/lto = true//' Cargo.toml
 HFUZZ_BUILD_ARGS="--features honggfuzz_fuzz" cargo hfuzz build
 for TARGET in src/bin/*.rs; do
        FILENAME=$(basename $TARGET)
index 47e179189789f953f0944b017b0bc3a5ff9faaec..c2bac324bd3c245490797fb5daf02f3554a56859 100644 (file)
@@ -59,7 +59,7 @@ impl Connection {
                                        return future::Either::A(blocker.then(|_| { Ok(()) }));
                                }
                        }
-                       //TODO: There's a race where we don't meet the requirements of disconnect_socket if its
+                       //TODO: There's a race where we don't meet the requirements of socket_disconnected if its
                        //called right here, after we release the us_ref lock in the scope above, but before we
                        //call read_event!
                        match peer_manager.read_event(&mut SocketDescriptor::new(us_ref.clone(), peer_manager.clone()), pending_read) {
@@ -84,7 +84,7 @@ impl Connection {
                        future::Either::B(future::result(Ok(())))
                }).then(move |_| {
                        if us_close_ref.lock().unwrap().need_disconnect {
-                               peer_manager_ref.disconnect_event(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone()));
+                               peer_manager_ref.socket_disconnected(&SocketDescriptor::new(us_close_ref, peer_manager_ref.clone()));
                                println!("Peer disconnected!");
                        } else {
                                println!("We disconnected peer!");
index 7a077e895a615fce737c4aa4db1cd13c0667e092..73aecfe9444892f2a4d3a0cbdfda0b6f35e49ec0 100644 (file)
@@ -19,6 +19,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::collections::HashSet;
 use std::ops::Deref;
 use std::marker::PhantomData;
+use std::ptr;
 
 /// Used to give chain error details upstream
 pub enum ChainError {
@@ -253,11 +254,22 @@ impl<'a, CL: Deref<Target = ChainListener + 'a> + 'a> BlockNotifier<'a, CL> {
        }
 
        /// Register the given listener to receive events.
-       // TODO: unregister
        pub fn register_listener(&self, listener: CL) {
                let mut vec = self.listeners.lock().unwrap();
                vec.push(listener);
        }
+       /// Unregister the given listener to no longer
+       /// receive events.
+       ///
+       /// If the same listener is registered multiple times, unregistering
+       /// will remove ALL occurrences of that listener. Comparison is done using
+       /// the pointer returned by the Deref trait implementation.
+       pub fn unregister_listener(&self, listener: CL) {
+               let mut vec = self.listeners.lock().unwrap();
+               // item is a ref to an abstract thing that dereferences to a ChainListener,
+               // so dereference it twice to get the ChainListener itself
+               vec.retain(|item | !ptr::eq(&(**item), &(*listener)));
+       }
 
        /// Notify listeners that a block was connected given a full, unfiltered block.
        ///
@@ -388,3 +400,80 @@ impl ChainWatchInterfaceUtil {
                watched.does_match_tx(tx)
        }
 }
+
+#[cfg(test)]
+mod tests {
+       use ln::functional_test_utils::{create_chanmon_cfgs, create_node_cfgs};
+       use super::{BlockNotifier, ChainListener};
+       use std::ptr;
+
+       #[test]
+       fn register_listener_test() {
+               let chanmon_cfgs = create_chanmon_cfgs(1);
+               let node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
+               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone());
+               assert_eq!(block_notifier.listeners.lock().unwrap().len(), 0);
+               let listener = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
+               block_notifier.register_listener(listener);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 1);
+               let item = vec.first().clone().unwrap();
+               assert!(ptr::eq(&(**item), &(*listener)));
+       }
+
+       #[test]
+       fn unregister_single_listener_test() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone());
+               let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
+               let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener;
+               block_notifier.register_listener(listener1);
+               block_notifier.register_listener(listener2);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 2);
+               drop(vec);
+               block_notifier.unregister_listener(listener1);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 1);
+               let item = vec.first().clone().unwrap();
+               assert!(ptr::eq(&(**item), &(*listener2)));
+       }
+
+       #[test]
+       fn unregister_single_listener_ref_test() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone());
+               block_notifier.register_listener(&node_cfgs[0].chan_monitor.simple_monitor as &ChainListener);
+               block_notifier.register_listener(&node_cfgs[1].chan_monitor.simple_monitor as &ChainListener);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 2);
+               drop(vec);
+               block_notifier.unregister_listener(&node_cfgs[0].chan_monitor.simple_monitor);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 1);
+               let item = vec.first().clone().unwrap();
+               assert!(ptr::eq(&(**item), &(*&node_cfgs[1].chan_monitor.simple_monitor)));
+       }
+
+       #[test]
+       fn unregister_multiple_of_the_same_listeners_test() {
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let block_notifier = BlockNotifier::new(node_cfgs[0].chain_monitor.clone());
+               let listener1 = &node_cfgs[0].chan_monitor.simple_monitor as &ChainListener;
+               let listener2 = &node_cfgs[1].chan_monitor.simple_monitor as &ChainListener;
+               block_notifier.register_listener(listener1);
+               block_notifier.register_listener(listener1);
+               block_notifier.register_listener(listener2);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 3);
+               drop(vec);
+               block_notifier.unregister_listener(listener1);
+               let vec = block_notifier.listeners.lock().unwrap();
+               assert_eq!(vec.len(), 1);
+               let item = vec.first().clone().unwrap();
+               assert!(ptr::eq(&(**item), &(*listener2)));
+       }
+}
index 158f71dba28bed6a1b67c2a0a9e8d640c7458a8a..544df015a8f48b404c390a63a909ec7aca3746ed 100644 (file)
@@ -88,6 +88,57 @@ pub enum SpendableOutputDescriptor {
        }
 }
 
+impl Writeable for SpendableOutputDescriptor {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       &SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+                               0u8.write(writer)?;
+                               outpoint.write(writer)?;
+                               output.write(writer)?;
+                       },
+                       &SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => {
+                               1u8.write(writer)?;
+                               outpoint.write(writer)?;
+                               key.write(writer)?;
+                               witness_script.write(writer)?;
+                               to_self_delay.write(writer)?;
+                               output.write(writer)?;
+                       },
+                       &SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => {
+                               2u8.write(writer)?;
+                               outpoint.write(writer)?;
+                               key.write(writer)?;
+                               output.write(writer)?;
+                       },
+               }
+               Ok(())
+       }
+}
+
+impl<R: ::std::io::Read> Readable<R> for SpendableOutputDescriptor {
+       fn read(reader: &mut R) -> Result<Self, DecodeError> {
+               match Readable::read(reader)? {
+                       0u8 => Ok(SpendableOutputDescriptor::StaticOutput {
+                               outpoint: Readable::read(reader)?,
+                               output: Readable::read(reader)?,
+                       }),
+                       1u8 => Ok(SpendableOutputDescriptor::DynamicOutputP2WSH {
+                               outpoint: Readable::read(reader)?,
+                               key: Readable::read(reader)?,
+                               witness_script: Readable::read(reader)?,
+                               to_self_delay: Readable::read(reader)?,
+                               output: Readable::read(reader)?,
+                       }),
+                       2u8 => Ok(SpendableOutputDescriptor::DynamicOutputP2WPKH {
+                               outpoint: Readable::read(reader)?,
+                               key: Readable::read(reader)?,
+                               output: Readable::read(reader)?,
+                       }),
+                       _ => Err(DecodeError::InvalidValue),
+               }
+       }
+}
+
 /// A trait to describe an object which can get user secrets and key material.
 pub trait KeysInterface: Send + Sync {
        /// A type which implements ChannelKeys which will be returned by get_channel_keys.
@@ -135,7 +186,8 @@ pub trait KeysInterface: Send + Sync {
 /// (TODO: We shouldn't require that, and should have an API to get them at deser time, due mostly
 /// to the possibility of reentrancy issues by calling the user's code during our deserialization
 /// routine).
-/// TODO: remove Clone once we start returning ChannelUpdate objects instead of copying ChannelMonitor
+/// TODO: We should remove Clone by instead requesting a new ChannelKeys copy when we create
+/// ChannelMonitors instead of expecting to clone the one out of the Channel into the monitors.
 pub trait ChannelKeys : Send+Clone {
        /// Gets the private key for the anchor tx
        fn funding_key<'a>(&'a self) -> &'a SecretKey;
index ce43984ebd48b270f0f32da2266da2ac940e2a6b..0f479ff91abdff8c12aabb277d149f9b1646a5b1 100644 (file)
@@ -39,6 +39,8 @@ impl OutPoint {
        }
 }
 
+impl_writeable!(OutPoint, 0, { txid, index });
+
 #[cfg(test)]
 mod tests {
        use chain::transaction::OutPoint;
index e7bea90914ebf3fc834f909a84dc7c9bee84217f..3fd489fa1a94348b1c9bc9d0faac1a6dd1ddff88 100644 (file)
@@ -17,6 +17,7 @@ use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 use ln::channelmanager::{PaymentHash, PaymentPreimage};
 use ln::msgs::DecodeError;
 use util::ser::{Readable, Writeable, Writer, WriterWriteAdaptor};
+use util::byte_utils;
 
 use secp256k1::key::{SecretKey, PublicKey};
 use secp256k1::{Secp256k1, Signature};
@@ -59,6 +60,114 @@ pub(super) fn build_commitment_secret(commitment_seed: &[u8; 32], idx: u64) -> [
        res
 }
 
+/// Implements the per-commitment secret storage scheme from
+/// [BOLT 3](https://github.com/lightningnetwork/lightning-rfc/blob/dcbf8583976df087c79c3ce0b535311212e6812d/03-transactions.md#efficient-per-commitment-secret-storage).
+///
+/// Allows us to keep track of all of the revocation secrets of counterarties in just 50*32 bytes
+/// or so.
+#[derive(Clone)]
+pub(super) struct CounterpartyCommitmentSecrets {
+       old_secrets: [([u8; 32], u64); 49],
+}
+
+impl PartialEq for CounterpartyCommitmentSecrets {
+       fn eq(&self, other: &Self) -> bool {
+               for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
+                       if secret != o_secret || idx != o_idx {
+                               return false
+                       }
+               }
+               true
+       }
+}
+
+impl CounterpartyCommitmentSecrets {
+       pub(super) fn new() -> Self {
+               Self { old_secrets: [([0; 32], 1 << 48); 49], }
+       }
+
+       #[inline]
+       fn place_secret(idx: u64) -> u8 {
+               for i in 0..48 {
+                       if idx & (1 << i) == (1 << i) {
+                               return i
+                       }
+               }
+               48
+       }
+
+       pub(super) fn get_min_seen_secret(&self) -> u64 {
+               //TODO This can be optimized?
+               let mut min = 1 << 48;
+               for &(_, idx) in self.old_secrets.iter() {
+                       if idx < min {
+                               min = idx;
+                       }
+               }
+               min
+       }
+
+       #[inline]
+       pub(super) fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
+               let mut res: [u8; 32] = secret;
+               for i in 0..bits {
+                       let bitpos = bits - 1 - i;
+                       if idx & (1 << bitpos) == (1 << bitpos) {
+                               res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7);
+                               res = Sha256::hash(&res).into_inner();
+                       }
+               }
+               res
+       }
+
+       pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), ()> {
+               let pos = Self::place_secret(idx);
+               for i in 0..pos {
+                       let (old_secret, old_idx) = self.old_secrets[i as usize];
+                       if Self::derive_secret(secret, pos, old_idx) != old_secret {
+                               return Err(());
+                       }
+               }
+               if self.get_min_seen_secret() <= idx {
+                       return Ok(());
+               }
+               self.old_secrets[pos as usize] = (secret, idx);
+               Ok(())
+       }
+
+       /// Can only fail if idx is < get_min_seen_secret
+       pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
+               for i in 0..self.old_secrets.len() {
+                       if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
+                               return Some(Self::derive_secret(self.old_secrets[i].0, i as u8, idx))
+                       }
+               }
+               assert!(idx < self.get_min_seen_secret());
+               None
+       }
+}
+
+impl Writeable for CounterpartyCommitmentSecrets {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               for &(ref secret, ref idx) in self.old_secrets.iter() {
+                       writer.write_all(secret)?;
+                       writer.write_all(&byte_utils::be64_to_array(*idx))?;
+               }
+               Ok(())
+       }
+}
+impl<R: ::std::io::Read> Readable<R> for CounterpartyCommitmentSecrets {
+       fn read(reader: &mut R) -> Result<Self, DecodeError> {
+               let mut old_secrets = [([0; 32], 1 << 48); 49];
+               for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
+                       *secret = Readable::read(reader)?;
+                       *idx = Readable::read(reader)?;
+               }
+
+               Ok(Self { old_secrets })
+       }
+}
+
 /// Derives a per-commitment-transaction private key (eg an htlc key or payment key) from the base
 /// private key for that type of key and the per_commitment_point (available in TxCreationKeys)
 pub fn derive_private_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, base_secret: &SecretKey) -> Result<SecretKey, secp256k1::Error> {
@@ -137,7 +246,7 @@ pub(super) fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx:
 
 /// The set of public keys which are used in the creation of one commitment transaction.
 /// These are derived from the channel base keys and per-commitment data.
-#[derive(PartialEq)]
+#[derive(PartialEq, Clone)]
 pub struct TxCreationKeys {
        /// The per-commitment public key which was used to derive the other keys.
        pub per_commitment_point: PublicKey,
@@ -153,6 +262,8 @@ pub struct TxCreationKeys {
        /// B's Payment Key
        pub(crate) b_payment_key: PublicKey,
 }
+impl_writeable!(TxCreationKeys, 33*6,
+       { per_commitment_point, revocation_key, a_htlc_key, b_htlc_key, a_delayed_payment_key, b_payment_key });
 
 /// One counterparty's public keys which do not change over the life of a channel.
 #[derive(Clone, PartialEq)]
@@ -235,6 +346,14 @@ pub struct HTLCOutputInCommitment {
        pub transaction_output_index: Option<u32>,
 }
 
+impl_writeable!(HTLCOutputInCommitment, 1 + 8 + 4 + 32 + 5, {
+       offered,
+       amount_msat,
+       cltv_expiry,
+       payment_hash,
+       transaction_output_index
+});
+
 #[inline]
 pub(super) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a_htlc_key: &PublicKey, b_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script {
        let payment_hash160 = Ripemd160::hash(&htlc.payment_hash.0[..]).into_inner();
@@ -505,3 +624,354 @@ impl<R: ::std::io::Read> Readable<R> for LocalCommitmentTransaction {
                Ok(Self { tx })
        }
 }
+
+#[cfg(test)]
+mod tests {
+       use super::CounterpartyCommitmentSecrets;
+       use hex;
+
+       #[test]
+       fn test_per_commitment_storage() {
+               // Test vectors from BOLT 3:
+               let mut secrets: Vec<[u8; 32]> = Vec::new();
+               let mut monitor;
+
+               macro_rules! test_secrets {
+                       () => {
+                               let mut idx = 281474976710655;
+                               for secret in secrets.iter() {
+                                       assert_eq!(monitor.get_secret(idx).unwrap(), *secret);
+                                       idx -= 1;
+                               }
+                               assert_eq!(monitor.get_min_seen_secret(), idx + 1);
+                               assert!(monitor.get_secret(idx).is_none());
+                       };
+               }
+
+               {
+                       // insert_secret correct sequence
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+                       monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+               }
+
+               {
+                       // insert_secret #1 incorrect
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       assert!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #2 incorrect (#1 derived from incorrect)
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #3 incorrect
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #4 incorrect (1,2,3 derived from incorrect)
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+                       assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #5 incorrect
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+                       assert!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #6 incorrect (5 derived from incorrect)
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap());
+                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+                       assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #7 incorrect
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap());
+                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+                       assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+               }
+
+               {
+                       // insert_secret #8 incorrect
+                       monitor = CounterpartyCommitmentSecrets::new();
+                       secrets.clear();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+                       test_secrets!();
+
+                       secrets.push([0; 32]);
+                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap());
+                       assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+               }
+       }
+}
index 5772d015b5680fc7bd1b25cd13c6464356ab944d..562b21524fbd1a62f547109dd6ff9377c1f3c10f 100644 (file)
@@ -3,6 +3,7 @@
 //! There are a bunch of these as their handling is relatively error-prone so they are split out
 //! here. See also the chanmon_fail_consistency fuzz test.
 
+use chain::transaction::OutPoint;
 use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
 use ln::channelmonitor::ChannelMonitorUpdateErr;
 use ln::features::InitFeatures;
@@ -19,7 +20,8 @@ use ln::functional_test_utils::*;
 #[test]
 fn test_simple_monitor_permanent_update_fail() {
        // Test that we handle a simple permanent monitor update failure
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -51,10 +53,11 @@ fn test_simple_monitor_permanent_update_fail() {
 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        // Test that we can recover from a simple temporary monitor update failure optionally with
        // a disconnect in between
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
@@ -74,8 +77,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
        }
 
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[0].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[0], 1);
+       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[0], 0);
 
        let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events_2.len(), 1);
@@ -114,10 +118,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
                reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
        }
 
-       // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
-       *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
-       nodes[0].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[0], 1);
+       // ...and make sure we can force-close a frozen channel
+       nodes[0].node.force_close_channel(&channel_id);
+       check_added_monitors!(nodes[0], 0);
        check_closed_broadcast!(nodes[0], false);
 
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
@@ -152,10 +155,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
        // * We then walk through more message exchanges to get the original update_add_htlc
        //   through, swapping message ordering based on disconnect_count & 8 and optionally
        //   disconnect/reconnecting based on disconnect_count.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
@@ -198,6 +202,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
                                }
 
                                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
+                               check_added_monitors!(nodes[0], 1);
                                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                                nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
                        }
@@ -214,8 +219,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
 
        // Now fix monitor updating...
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[0].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[0], 1);
+       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[0], 0);
 
        macro_rules! disconnect_reconnect_peers { () => { {
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
@@ -480,10 +486,11 @@ fn test_monitor_temporary_update_fail_c() {
 #[test]
 fn test_monitor_update_fail_cs() {
        // Tests handling of a monitor update failure when processing an incoming commitment_signed
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -501,8 +508,9 @@ fn test_monitor_update_fail_cs() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
        let responses = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(responses.len(), 2);
 
@@ -534,8 +542,9 @@ fn test_monitor_update_fail_cs() {
        }
 
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[0].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[0], 1);
+       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[0], 0);
 
        let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
@@ -559,12 +568,13 @@ fn test_monitor_update_fail_cs() {
 #[test]
 fn test_monitor_update_fail_no_rebroadcast() {
        // Tests handling of a monitor update failure when no message rebroadcasting on
-       // test_restore_channel_monitor() is required. Backported from
-       // chanmon_fail_consistency fuzz tests.
-       let node_cfgs = create_node_cfgs(2);
+       // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
+       // fuzz tests.
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
@@ -584,9 +594,10 @@ fn test_monitor_update_fail_no_rebroadcast() {
        check_added_monitors!(nodes[1], 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       check_added_monitors!(nodes[1], 1);
+       check_added_monitors!(nodes[1], 0);
        expect_pending_htlcs_forwardable!(nodes[1]);
 
        let events = nodes[1].node.get_and_clear_pending_events();
@@ -605,10 +616,11 @@ fn test_monitor_update_fail_no_rebroadcast() {
 fn test_monitor_update_raa_while_paused() {
        // Tests handling of an RAA while monitor updating has already been marked failed.
        // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
 
@@ -642,8 +654,9 @@ fn test_monitor_update_raa_while_paused() {
        check_added_monitors!(nodes[0], 1);
 
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[0].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[0], 1);
+       let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[0], 0);
 
        let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
@@ -674,7 +687,8 @@ fn test_monitor_update_raa_while_paused() {
 
 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        // Tests handling of a monitor update failure when processing an incoming RAA
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -784,6 +798,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
                send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
                nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
                nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
+               check_added_monitors!(nodes[1], 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
                nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -794,8 +809,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
        // Restore monitor updating, ensuring we immediately get a fail-back update and a
        // update_add update.
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
 
@@ -929,10 +945,11 @@ fn test_monitor_update_fail_reestablish() {
        // Simple test for message retransmission after monitor update failure on
        // channel_reestablish generating a monitor update (which comes from freeing holding cell
        // HTLCs).
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
        create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
 
        let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
@@ -983,8 +1000,9 @@ fn test_monitor_update_fail_reestablish() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -1009,10 +1027,11 @@ fn raa_no_response_awaiting_raa_state() {
        // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
        // in question (assuming it intends to respond with a CS after monitor updating is restored).
        // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
        let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
@@ -1063,9 +1082,10 @@ fn raa_no_response_awaiting_raa_state() {
        check_added_monitors!(nodes[1], 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
        // nodes[1] should be AwaitingRAA here!
-       check_added_monitors!(nodes[1], 1);
+       check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, 1000000);
@@ -1124,10 +1144,11 @@ fn claim_while_disconnected_monitor_update_fail() {
        // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
        // code introduced a regression in this test (specifically, this caught a removal of the
        // channel_reestablish handling ensuring the order was sensical given the messages used).
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        // Forward a payment for B to claim
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -1167,16 +1188,18 @@ fn claim_while_disconnected_monitor_update_fail() {
        let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
+       check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
        // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
-       // until we've test_restore_channel_monitor'd and updated for the new commitment transaction.
+       // until we've channel_monitor_update'd and updated for the new commitment transaction.
 
        // Now un-fail the monitor, which will result in B sending its original commitment update,
        // receiving the commitment update from A, and the resulting commitment dances.
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(bs_msgs.len(), 2);
@@ -1241,10 +1264,11 @@ fn monitor_failed_no_reestablish_response() {
        // response to a commitment_signed.
        // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
        // debug_assert!() failure in channel_reestablish handling.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        // Route the payment and deliver the initial commitment_signed (with a monitor update failure
        // on receipt).
@@ -1278,8 +1302,9 @@ fn monitor_failed_no_reestablish_response() {
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
        let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
 
        nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
@@ -1309,10 +1334,11 @@ fn first_message_on_recv_ordering() {
        // have no pending response but will want to send a RAA/CS (with the updates for the second
        // payment applied).
        // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        // Route the first payment outbound, holding the last RAA for B until we are set up so that we
        // can deliver it and fail the monitor update.
@@ -1358,16 +1384,18 @@ fn first_message_on_recv_ordering() {
        check_added_monitors!(nodes[1], 1);
 
        // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
-       // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
-       // the appropriate HTLC acceptance).
+       // RAA/CS response, which should be generated when we call channel_monitor_update (with the
+       // appropriate HTLC acceptance).
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
+       check_added_monitors!(nodes[1], 1);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        expect_pending_htlcs_forwardable!(nodes[1]);
        expect_payment_received!(nodes[1], payment_hash_1, 1000000);
@@ -1396,7 +1424,8 @@ fn test_monitor_update_fail_claim() {
        // update to claim the payment. We then send a payment C->B->A, making the forward of this
        // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
        // updating and claim the payment on B.
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -1417,7 +1446,7 @@ fn test_monitor_update_fail_claim() {
        check_added_monitors!(nodes[2], 1);
 
        // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
-       // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
+       // paused, so forward shouldn't succeed until we call channel_monitor_updated().
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
 
        let mut events = nodes[2].node.get_and_clear_pending_msg_events();
@@ -1451,8 +1480,9 @@ fn test_monitor_update_fail_claim() {
        } else { panic!("Unexpected event!"); }
 
        // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
@@ -1471,10 +1501,11 @@ fn test_monitor_update_on_pending_forwards() {
        // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
        // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
        // from C to A will be pending a forward to A.
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
        create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
 
        // Rebalance a bit so that we can send backwards from 3 to 1.
@@ -1508,8 +1539,9 @@ fn test_monitor_update_on_pending_forwards() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
@@ -1538,10 +1570,11 @@ fn monitor_update_claim_fail_no_response() {
        // to channel being AwaitingRAA).
        // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
        // code was broken.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
 
        // Forward a payment for B to claim
        let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -1566,8 +1599,9 @@ fn monitor_update_claim_fail_no_response() {
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
@@ -1599,7 +1633,8 @@ fn monitor_update_claim_fail_no_response() {
 fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
        // Test that if the monitor update generated by funding_transaction_generated fails we continue
        // the channel setup happily after the update is restored.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -1616,14 +1651,17 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
        check_added_monitors!(nodes[0], 1);
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
-       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+       let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+       let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+       nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
        check_added_monitors!(nodes[1], 1);
 
        if restore_between_fails {
                assert!(fail_on_generate);
                *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-               nodes[0].node.test_restore_channel_monitor();
-               check_added_monitors!(nodes[0], 1);
+               let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+               nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+               check_added_monitors!(nodes[0], 0);
                assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        }
@@ -1639,18 +1677,20 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
                if fail_on_generate && !restore_between_fails {
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1);
-                       check_added_monitors!(nodes[0], 0);
+                       check_added_monitors!(nodes[0], 1);
                } else {
                        nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
                        check_added_monitors!(nodes[0], 1);
                }
                assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
                *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
-               nodes[0].node.test_restore_channel_monitor();
+               let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+               nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+               check_added_monitors!(nodes[0], 0);
+       } else {
+               check_added_monitors!(nodes[0], 1);
        }
 
-       check_added_monitors!(nodes[0], 1);
-
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
@@ -1684,8 +1724,9 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
        }
 
        *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
-       nodes[1].node.test_restore_channel_monitor();
-       check_added_monitors!(nodes[1], 1);
+       let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+       nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+       check_added_monitors!(nodes[1], 0);
 
        let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
                nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
index 317419702095cf7a8adc3e11415b2954b3d522bd..2e4f49ce8371991630e8e5c470b6a625ee821a1a 100644 (file)
@@ -18,9 +18,9 @@ use secp256k1;
 use ln::features::{ChannelFeatures, InitFeatures};
 use ln::msgs;
 use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
-use ln::channelmonitor::ChannelMonitor;
-use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
-use ln::chan_utils::{LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep};
+use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys};
 use ln::chan_utils;
 use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
 use chain::transaction::OutPoint;
@@ -35,6 +35,7 @@ use std;
 use std::default::Default;
 use std::{cmp,mem,fmt};
 use std::sync::{Arc};
+use std::ops::Deref;
 
 #[cfg(test)]
 pub struct ChannelValueStat {
@@ -240,11 +241,14 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
        secp_ctx: Secp256k1<secp256k1::All>,
        channel_value_satoshis: u64,
 
+       latest_monitor_update_id: u64,
+
        #[cfg(not(test))]
        local_keys: ChanSigner,
        #[cfg(test)]
        pub(super) local_keys: ChanSigner,
        shutdown_pubkey: PublicKey,
+       destination_script: Script,
 
        // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
        // generation start at 0 and count up...this simplifies some parts of implementation at the
@@ -269,7 +273,7 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
        monitor_pending_funding_locked: bool,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
-       monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
+       monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
 
        // pending_update_fee is filled when sending and receiving update_fee
@@ -303,6 +307,8 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
 
        last_sent_closing_fee: Option<(u64, u64, Signature)>, // (feerate, fee, our_sig)
 
+       funding_txo: Option<OutPoint>,
+
        /// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
        /// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
        /// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
@@ -347,7 +353,10 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
 
        their_shutdown_scriptpubkey: Option<Script>,
 
-       channel_monitor: ChannelMonitor<ChanSigner>,
+       /// Used exclusively to broadcast the latest local state, mostly a historical quirk that this
+       /// is here:
+       channel_monitor: Option<ChannelMonitor<ChanSigner>>,
+       commitment_secrets: CounterpartyCommitmentSecrets,
 
        network_sync: UpdateStatus,
 
@@ -359,11 +368,18 @@ pub const OUR_MAX_HTLCS: u16 = 50; //TODO
 /// on ice until the funding transaction gets more confirmations, but the LN protocol doesn't
 /// really allow for this, so instead we're stuck closing it out at that point.
 const UNCONF_THRESHOLD: u32 = 6;
-/// Exposing these two constants for use in test in ChannelMonitor
-pub const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
-pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
 const SPENDING_INPUT_FOR_A_OUTPUT_WEIGHT: u64 = 79; // prevout: 36, nSequence: 4, script len: 1, witness lengths: (3+1)/4, sig: 73/4, if-selector: 1, redeemScript: (6 ops + 2*33 pubkeys + 1*2 delay)/4
 const B_OUTPUT_PLUS_SPENDING_INPUT_WEIGHT: u64 = 104; // prevout: 40, nSequence: 4, script len: 1, witness lengths: 3/4, sig: 73/4, pubkey: 33/4, output: 31 (TODO: Wrong? Useless?)
+
+#[cfg(not(test))]
+const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
+#[cfg(test)]
+pub const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
+#[cfg(not(test))]
+const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+#[cfg(test)]
+pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+
 /// Maximmum `funding_satoshis` value, according to the BOLT #2 specification
 /// it's 2^24.
 pub const MAX_FUNDING_SATOSHIS: u64 = (1 << 24);
@@ -371,16 +387,16 @@ pub const MAX_FUNDING_SATOSHIS: u64 = (1 << 24);
 /// Used to return a simple Error back to ChannelManager. Will get converted to a
 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
 /// channel_id in ChannelManager.
-pub(super) enum ChannelError<ChanSigner: ChannelKeys> {
+pub(super) enum ChannelError {
        Ignore(&'static str),
        Close(&'static str),
        CloseDelayBroadcast {
                msg: &'static str,
-               update: Option<ChannelMonitor<ChanSigner>>,
+               update: ChannelMonitorUpdate,
        },
 }
 
-impl<ChanSigner: ChannelKeys> fmt::Debug for ChannelError<ChanSigner> {
+impl fmt::Debug for ChannelError {
        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
                match self {
                        &ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
@@ -422,7 +438,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        }
 
        // Constructors:
-       pub fn new_outbound(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface<ChanKeySigner = ChanSigner>>, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, APIError> {
+       pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, APIError>
+       where K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+             F::Target: FeeEstimator,
+       {
                let chan_keys = keys_provider.get_channel_keys(false, channel_value_satoshis);
 
                if channel_value_satoshis >= MAX_FUNDING_SATOSHIS {
@@ -444,12 +463,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
 
-               let secp_ctx = Secp256k1::new();
-               let channel_monitor = ChannelMonitor::new(chan_keys.clone(),
-                                                         chan_keys.funding_key(), chan_keys.revocation_base_key(), chan_keys.delayed_payment_base_key(),
-                                                         chan_keys.htlc_base_key(), chan_keys.payment_base_key(), &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
-                                                         keys_provider.get_destination_script(), logger.clone());
-
                Ok(Channel {
                        user_id: user_id,
                        config: config.channel_options.clone(),
@@ -457,11 +470,15 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        channel_id: keys_provider.get_channel_id(),
                        channel_state: ChannelState::OurInitSent as u32,
                        channel_outbound: true,
-                       secp_ctx: secp_ctx,
+                       secp_ctx: Secp256k1::new(),
                        channel_value_satoshis: channel_value_satoshis,
 
+                       latest_monitor_update_id: 0,
+
                        local_keys: chan_keys,
                        shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+                       destination_script: keys_provider.get_destination_script(),
+
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: channel_value_satoshis * 1000 - push_msat,
@@ -490,6 +507,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        last_sent_closing_fee: None,
 
+                       funding_txo: None,
                        funding_tx_confirmed_in: None,
                        short_channel_id: None,
                        last_block_connected: Default::default(),
@@ -515,7 +533,8 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        their_shutdown_scriptpubkey: None,
 
-                       channel_monitor: channel_monitor,
+                       channel_monitor: None,
+                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
 
                        network_sync: UpdateStatus::Fresh,
 
@@ -523,7 +542,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                })
        }
 
-       fn check_remote_fee(fee_estimator: &FeeEstimator, feerate_per_kw: u32) -> Result<(), ChannelError<ChanSigner>> {
+       fn check_remote_fee<F: Deref>(fee_estimator: &F, feerate_per_kw: u32) -> Result<(), ChannelError>
+               where F::Target: FeeEstimator
+       {
                if (feerate_per_kw as u64) < fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) {
                        return Err(ChannelError::Close("Peer's feerate much too low"));
                }
@@ -535,7 +556,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        /// Creates a new channel from a remote sides' request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
-       pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface<ChanKeySigner = ChanSigner>>, their_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, ChannelError<ChanSigner>> {
+       pub fn new_from_req<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, their_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, ChannelError>
+               where K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+          F::Target: FeeEstimator
+       {
                let mut chan_keys = keys_provider.get_channel_keys(true, msg.funding_satoshis);
                let their_pubkeys = ChannelPublicKeys {
                        funding_pubkey: msg.funding_pubkey,
@@ -643,12 +667,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        return Err(ChannelError::Close("Insufficient funding amount for initial commitment"));
                }
 
-               let secp_ctx = Secp256k1::new();
-               let channel_monitor = ChannelMonitor::new(chan_keys.clone(),
-                                                         chan_keys.funding_key(), chan_keys.revocation_base_key(), chan_keys.delayed_payment_base_key(),
-                                                         chan_keys.htlc_base_key(), chan_keys.payment_base_key(), &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
-                                                         keys_provider.get_destination_script(), logger.clone());
-
                let their_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
                        match &msg.shutdown_scriptpubkey {
                                &OptionalField::Present(ref script) => {
@@ -670,17 +688,21 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        }
                } else { None };
 
-               let mut chan = Channel {
+               let chan = Channel {
                        user_id: user_id,
                        config: local_config,
 
                        channel_id: msg.temporary_channel_id,
                        channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
                        channel_outbound: false,
-                       secp_ctx: secp_ctx,
+                       secp_ctx: Secp256k1::new(),
+
+                       latest_monitor_update_id: 0,
 
                        local_keys: chan_keys,
                        shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+                       destination_script: keys_provider.get_destination_script(),
+
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: msg.push_msat,
@@ -709,6 +731,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        last_sent_closing_fee: None,
 
+                       funding_txo: None,
                        funding_tx_confirmed_in: None,
                        short_channel_id: None,
                        last_block_connected: Default::default(),
@@ -735,17 +758,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        their_shutdown_scriptpubkey,
 
-                       channel_monitor: channel_monitor,
+                       channel_monitor: None,
+                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
 
                        network_sync: UpdateStatus::Fresh,
 
                        logger,
                };
 
-               let obscure_factor = chan.get_commitment_transaction_number_obscure_factor();
-               let funding_redeemscript = chan.get_funding_redeemscript();
-               chan.channel_monitor.set_basic_channel_info(&msg.htlc_basepoint, &msg.delayed_payment_basepoint, msg.to_self_delay, funding_redeemscript, msg.funding_satoshis, obscure_factor);
-
                Ok(chan)
        }
 
@@ -804,7 +824,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let txins = {
                        let mut ins: Vec<TxIn> = Vec::new();
                        ins.push(TxIn {
-                               previous_output: self.channel_monitor.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
                                script_sig: Script::new(),
                                sequence: ((0x80 as u32) << 8*3) | ((obscured_commitment_transaction_number >> 3*8) as u32),
                                witness: Vec::new(),
@@ -1023,7 +1043,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let txins = {
                        let mut ins: Vec<TxIn> = Vec::new();
                        ins.push(TxIn {
-                               previous_output: self.channel_monitor.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
                                script_sig: Script::new(),
                                sequence: 0xffffffff,
                                witness: Vec::new(),
@@ -1082,7 +1102,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// our counterparty!)
        /// The result is a transaction which we can revoke ownership of (ie a "local" transaction)
        /// TODO Some magic rust shit to compile-time check this?
-       fn build_local_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError<ChanSigner>> {
+       fn build_local_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError> {
                let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(commitment_number));
                let delayed_payment_base = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.delayed_payment_base_key());
                let htlc_basepoint = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.htlc_base_key());
@@ -1095,7 +1115,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
        /// will sign and send to our counterparty.
        /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
-       fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError<ChanSigner>> {
+       fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError> {
                //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
                //may see payments to it!
                let payment_basepoint = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.payment_base_key());
@@ -1124,7 +1144,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
        /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
        /// Ok(_) if debug assertions are turned on and preconditions are met.
-       fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitor<ChanSigner>>), ChannelError<ChanSigner>> {
+       fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
@@ -1170,7 +1190,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                //
                // We have to put the payment_preimage in the channel_monitor right away here to ensure we
                // can claim it even if the channel hits the chain before we see their next commitment.
-               self.channel_monitor.provide_payment_preimage(&payment_hash_calc, &payment_preimage_arg);
+               self.latest_monitor_update_id += 1;
+               let monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                               payment_preimage: payment_preimage_arg.clone(),
+                       }],
+               };
+               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
 
                if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
                        for pending_update in self.holding_cell_htlc_updates.iter() {
@@ -1185,7 +1212,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                        log_warn!(self, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
-                                                       return Ok((None, Some(self.channel_monitor.clone())));
+                                                       return Ok((None, Some(monitor_update)));
                                                }
                                        },
                                        _ => {}
@@ -1195,7 +1222,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
                                payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
                        });
-                       return Ok((None, Some(self.channel_monitor.clone())));
+                       return Ok((None, Some(monitor_update)));
                }
 
                {
@@ -1203,7 +1230,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        if let InboundHTLCState::Committed = htlc.state {
                        } else {
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                               return Ok((None, Some(self.channel_monitor.clone())));
+                               return Ok((None, Some(monitor_update)));
                        }
                        log_trace!(self, "Upgrading HTLC {} to LocalRemoved with a Fulfill!", log_bytes!(htlc.payment_hash.0));
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
@@ -1213,16 +1240,24 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        channel_id: self.channel_id(),
                        htlc_id: htlc_id_arg,
                        payment_preimage: payment_preimage_arg,
-               }), Some(self.channel_monitor.clone())))
+               }), Some(monitor_update)))
        }
 
-       pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitor<ChanSigner>>), ChannelError<ChanSigner>> {
+       pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitorUpdate>), ChannelError> {
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage)? {
-                       (Some(update_fulfill_htlc), _) => {
+                       (Some(update_fulfill_htlc), Some(mut monitor_update)) => {
+                               let (commitment, mut additional_update) = self.send_commitment_no_status_check()?;
+                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
+                               Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
+                       },
+                       (Some(update_fulfill_htlc), None) => {
                                let (commitment, monitor_update) = self.send_commitment_no_status_check()?;
                                Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
                        },
-                       (None, Some(channel_monitor)) => Ok((None, Some(channel_monitor))),
+                       (None, Some(monitor_update)) => Ok((None, Some(monitor_update))),
                        (None, None) => Ok((None, None))
                }
        }
@@ -1230,7 +1265,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
        /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
        /// Ok(_) if debug assertions are turned on and preconditions are met.
-       pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError<ChanSigner>> {
+       pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
                }
@@ -1298,7 +1333,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        // Message handlers:
 
-       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError> {
                // Check sanity of message fields:
                if !self.channel_outbound {
                        return Err(ChannelError::Close("Got an accept_channel message from an inbound peer"));
@@ -1400,16 +1435,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                self.their_cur_commitment_point = Some(msg.first_per_commitment_point);
                self.their_shutdown_scriptpubkey = their_shutdown_scriptpubkey;
 
-               let obscure_factor = self.get_commitment_transaction_number_obscure_factor();
-               let funding_redeemscript = self.get_funding_redeemscript();
-               self.channel_monitor.set_basic_channel_info(&msg.htlc_basepoint, &msg.delayed_payment_basepoint, msg.to_self_delay, funding_redeemscript, self.channel_value_satoshis, obscure_factor);
-
                self.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
 
                Ok(())
        }
 
-       fn funding_created_signature(&mut self, sig: &Signature) -> Result<(Transaction, LocalCommitmentTransaction, Signature, TxCreationKeys), ChannelError<ChanSigner>> {
+       fn funding_created_signature(&mut self, sig: &Signature) -> Result<(Transaction, LocalCommitmentTransaction, Signature, TxCreationKeys), ChannelError> {
                let funding_script = self.get_funding_redeemscript();
 
                let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
@@ -1434,7 +1465,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                &self.their_pubkeys.as_ref().expect("their_funding_pubkey() only allowed after accept_channel").funding_pubkey
        }
 
-       pub fn funding_created(&mut self, msg: &msgs::FundingCreated) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       pub fn funding_created(&mut self, msg: &msgs::FundingCreated) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError> {
                if self.channel_outbound {
                        return Err(ChannelError::Close("Received funding_created for an outbound channel?"));
                }
@@ -1444,28 +1475,47 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // channel.
                        return Err(ChannelError::Close("Received funding_created after we got the channel!"));
                }
-               if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+               if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
                                self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
                let funding_txo = OutPoint::new(msg.funding_txid, msg.funding_output_index);
-               let funding_txo_script = self.get_funding_redeemscript().to_v0_p2wsh();
-               self.channel_monitor.set_funding_info((funding_txo, funding_txo_script));
+               self.funding_txo = Some(funding_txo.clone());
 
                let (remote_initial_commitment_tx, local_initial_commitment_tx, our_signature, local_keys) = match self.funding_created_signature(&msg.signature) {
                        Ok(res) => res,
                        Err(e) => {
-                               self.channel_monitor.unset_funding_info();
+                               self.funding_txo = None;
                                return Err(e);
                        }
                };
 
                // Now that we're past error-generating stuff, update our local state:
 
-               self.channel_monitor.provide_latest_remote_commitment_tx_info(&remote_initial_commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
-               self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx, local_keys, self.feerate_per_kw, Vec::new());
+               let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
+               let funding_redeemscript = self.get_funding_redeemscript();
+               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               macro_rules! create_monitor {
+                       () => { {
+                               let mut channel_monitor = ChannelMonitor::new(self.local_keys.clone(),
+                                                                             &self.shutdown_pubkey, self.our_to_self_delay,
+                                                                             &self.destination_script, (funding_txo, funding_txo_script.clone()),
+                                                                             &their_pubkeys.htlc_basepoint, &their_pubkeys.delayed_payment_basepoint,
+                                                                             self.their_to_self_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
+                                                                             self.get_commitment_transaction_number_obscure_factor(),
+                                                                             self.logger.clone());
+
+                               channel_monitor.provide_latest_remote_commitment_tx_info(&remote_initial_commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+                               channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys.clone(), self.feerate_per_kw, Vec::new()).unwrap();
+                               channel_monitor
+                       } }
+               }
+
+               self.channel_monitor = Some(create_monitor!());
+               let channel_monitor = create_monitor!();
+
                self.channel_state = ChannelState::FundingSent as u32;
                self.channel_id = funding_txo.to_channel_id();
                self.cur_remote_commitment_transaction_number -= 1;
@@ -1474,19 +1524,19 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature: our_signature
-               }, self.channel_monitor.clone()))
+               }, channel_monitor))
        }
 
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed(&mut self, msg: &msgs::FundingSigned) -> Result<ChannelMonitor<ChanSigner>, ChannelError<ChanSigner>> {
+       pub fn funding_signed(&mut self, msg: &msgs::FundingSigned) -> Result<ChannelMonitorUpdate, (Option<ChannelMonitorUpdate>, ChannelError)> {
                if !self.channel_outbound {
-                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?"));
+                       return Err((None, ChannelError::Close("Received funding_signed for an inbound channel?")));
                }
                if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
-                       return Err(ChannelError::Close("Received funding_signed in strange state!"));
+                       return Err((None, ChannelError::Close("Received funding_signed in strange state!")));
                }
-               if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+               if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER - 1 ||
                                self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
@@ -1494,29 +1544,38 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                let funding_script = self.get_funding_redeemscript();
 
-               let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
+               let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number).map_err(|e| (None, e))?;
                let local_initial_commitment_tx = self.build_commitment_transaction(self.cur_local_commitment_transaction_number, &local_keys, true, false, self.feerate_per_kw).0;
                let local_sighash = hash_to_message!(&bip143::SighashComponents::new(&local_initial_commitment_tx).sighash_all(&local_initial_commitment_tx.input[0], &funding_script, self.channel_value_satoshis)[..]);
 
                let their_funding_pubkey = &self.their_pubkeys.as_ref().unwrap().funding_pubkey;
 
                // They sign the "local" commitment transaction, allowing us to broadcast the tx if we wish.
-               secp_check!(self.secp_ctx.verify(&local_sighash, &msg.signature, their_funding_pubkey), "Invalid funding_signed signature from peer");
+               if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, their_funding_pubkey) {
+                       return Err((None, ChannelError::Close("Invalid funding_signed signature from peer")));
+               }
 
-               self.channel_monitor.provide_latest_local_commitment_tx_info(
-                       LocalCommitmentTransaction::new_missing_local_sig(local_initial_commitment_tx, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), their_funding_pubkey),
-                       local_keys, self.feerate_per_kw, Vec::new());
+               self.latest_monitor_update_id += 1;
+               let monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+                               commitment_tx: LocalCommitmentTransaction::new_missing_local_sig(local_initial_commitment_tx, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), their_funding_pubkey),
+                               local_keys, feerate_per_kw: self.feerate_per_kw, htlc_outputs: Vec::new(),
+                       }]
+               };
+               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
                self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32));
                self.cur_local_commitment_transaction_number -= 1;
 
                if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
-                       Ok(self.channel_monitor.clone())
+                       Ok(monitor_update)
                } else {
-                       Err(ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast"))
+                       Err((Some(monitor_update),
+                               ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast")))
                }
        }
 
-       pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish"));
                }
@@ -1587,7 +1646,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                cmp::min(self.value_to_self_msat as i64 - self.get_outbound_pending_htlc_stats().1 as i64, 0) as u64)
        }
 
-       pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state"));
                }
@@ -1661,7 +1720,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
        #[inline]
-       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError<ChanSigner>> {
+       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
                for htlc in self.pending_outbound_htlcs.iter_mut() {
                        if htlc.htlc_id == htlc_id {
                                match check_preimage {
@@ -1686,7 +1745,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find"))
        }
 
-       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError<ChanSigner>> {
+       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state"));
                }
@@ -1698,7 +1757,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone())
        }
 
-       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state"));
                }
@@ -1710,7 +1769,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                Ok(())
        }
 
-       pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state"));
                }
@@ -1722,20 +1781,20 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                Ok(())
        }
 
-       pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &FeeEstimator) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       pub fn commitment_signed<F: Deref>(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &F) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)> where F::Target: FeeEstimator {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state"));
+                       return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state")));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish"));
+                       return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish")));
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds"));
+                       return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds")));
                }
 
                let funding_script = self.get_funding_redeemscript();
 
-               let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
+               let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number).map_err(|e| (None, e))?;
 
                let mut update_fee = false;
                let feerate_per_kw = if !self.channel_outbound && self.pending_update_fee.is_some() {
@@ -1753,7 +1812,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let local_commitment_txid = local_commitment_tx.0.txid();
                let local_sighash = hash_to_message!(&bip143::SighashComponents::new(&local_commitment_tx.0).sighash_all(&local_commitment_tx.0.input[0], &funding_script, self.channel_value_satoshis)[..]);
                log_trace!(self, "Checking commitment tx signature {} by key {} against tx {} with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.their_funding_pubkey().serialize()), encode::serialize_hex(&local_commitment_tx.0), encode::serialize_hex(&funding_script));
-               secp_check!(self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey()), "Invalid commitment tx signature from peer");
+               if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey()) {
+                       return Err((None, ChannelError::Close("Invalid commitment tx signature from peer")));
+               }
 
                //If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction
                if update_fee {
@@ -1761,12 +1822,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        let total_fee: u64 = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
 
                        if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + self.their_channel_reserve_satoshis {
-                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee"));
+                               return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee")));
                        }
                }
 
                if msg.htlc_signatures.len() != local_commitment_tx.1 {
-                       return Err(ChannelError::Close("Got wrong number of HTLC signatures from remote"));
+                       return Err((None, ChannelError::Close("Got wrong number of HTLC signatures from remote")));
                }
 
                let mut htlcs_and_sigs = Vec::with_capacity(local_commitment_tx.2.len());
@@ -1776,7 +1837,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &local_keys);
                                log_trace!(self, "Checking HTLC tx signature {} by key {} against tx {} with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(local_keys.b_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), encode::serialize_hex(&htlc_redeemscript));
                                let htlc_sighash = hash_to_message!(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]);
-                               secp_check!(self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key), "Invalid HTLC tx signature from peer");
+                               if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key) {
+                                       return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer")));
+                               }
                                htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
                        } else {
                                htlcs_and_sigs.push((htlc, None, source));
@@ -1803,9 +1866,15 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                let their_funding_pubkey = self.their_pubkeys.as_ref().unwrap().funding_pubkey;
 
-               self.channel_monitor.provide_latest_local_commitment_tx_info(
-                       LocalCommitmentTransaction::new_missing_local_sig(local_commitment_tx.0, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), &their_funding_pubkey),
-                       local_keys, self.feerate_per_kw, htlcs_and_sigs);
+               self.latest_monitor_update_id += 1;
+               let mut monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+                               commitment_tx: LocalCommitmentTransaction::new_missing_local_sig(local_commitment_tx.0, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), &their_funding_pubkey),
+                               local_keys, feerate_per_kw: self.feerate_per_kw, htlc_outputs: htlcs_and_sigs
+                       }]
+               };
+               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
 
                for htlc in self.pending_inbound_htlcs.iter_mut() {
                        let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
@@ -1838,26 +1907,31 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
                                // the corresponding HTLC status updates so that get_last_commitment_update
                                // includes the right HTLCs.
-                               // Note that this generates a monitor update that we ignore! This is OK since we
-                               // won't actually send the commitment_signed that generated the update to the other
-                               // side until the latest monitor has been pulled from us and stored.
                                self.monitor_pending_commitment_signed = true;
-                               self.send_commitment_no_status_check()?;
+                               let (_, mut additional_update) = self.send_commitment_no_status_check().map_err(|e| (None, e))?;
+                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
                        }
                        // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
                        // re-send the message on restoration)
-                       return Err(ChannelError::Ignore("Previous monitor update failure prevented generation of RAA"));
+                       return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA")));
                }
 
-               let (our_commitment_signed, monitor_update, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let (our_commitment_signed, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
-                       let (msg, monitor) = self.send_commitment_no_status_check()?;
-                       (Some(msg), monitor, None)
+                       let (msg, mut additional_update) = self.send_commitment_no_status_check().map_err(|e| (None, e))?;
+                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       // strictly increasing by one, so decrement it here.
+                       self.latest_monitor_update_id = monitor_update.update_id;
+                       monitor_update.updates.append(&mut additional_update.updates);
+                       (Some(msg), None)
                } else if !need_our_commitment {
-                       (None, self.channel_monitor.clone(), self.maybe_propose_first_closing_signed(fee_estimator))
-               } else { (None, self.channel_monitor.clone(), None) };
+                       (None, self.maybe_propose_first_closing_signed(fee_estimator))
+               } else { (None, None) };
 
                Ok((msgs::RevokeAndACK {
                        channel_id: self.channel_id,
@@ -1868,11 +1942,16 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
        /// fulfilling or failing the last pending HTLC)
-       fn free_holding_cell_htlcs(&mut self) -> Result<Option<(msgs::CommitmentUpdate, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+       fn free_holding_cell_htlcs(&mut self) -> Result<Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, ChannelError> {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
                        log_trace!(self, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" });
 
+                       let mut monitor_update = ChannelMonitorUpdate {
+                               update_id: self.latest_monitor_update_id + 1, // We don't increment this yet!
+                               updates: Vec::new(),
+                       };
+
                        let mut htlc_updates = Vec::new();
                        mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
                        let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
@@ -1907,7 +1986,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                },
                                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
                                                        match self.get_update_fulfill_htlc(htlc_id, *payment_preimage) {
-                                                               Ok(update_fulfill_msg_option) => update_fulfill_htlcs.push(update_fulfill_msg_option.0.unwrap()),
+                                                               Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => {
+                                                                       update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+                                                                       if let Some(mut additional_monitor_update) = additional_monitor_update_opt {
+                                                                               monitor_update.updates.append(&mut additional_monitor_update.updates);
+                                                                       }
+                                                               },
                                                                Err(e) => {
                                                                        if let ChannelError::Ignore(_) = e {}
                                                                        else {
@@ -1957,7 +2041,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                } else {
                                                        None
                                                };
-                                       let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
+
+                                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check()?;
+                                       // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+                                       // but we want them to be strictly increasing by one, so reset it here.
+                                       self.latest_monitor_update_id = monitor_update.update_id;
+                                       monitor_update.updates.append(&mut additional_update.updates);
+
                                        Ok(Some((msgs::CommitmentUpdate {
                                                update_add_htlcs,
                                                update_fulfill_htlcs,
@@ -1979,7 +2069,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &FeeEstimator) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       pub fn revoke_and_ack<F: Deref>(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &F) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate), ChannelError>
+               where F::Target: FeeEstimator
+       {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state"));
                }
@@ -1995,8 +2087,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey"));
                        }
                }
-               self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
-                       .map_err(|e| ChannelError::Close(e.0))?;
 
                if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
                        // Our counterparty seems to have burned their coins to us (by revoking a state when we
@@ -2009,6 +2099,18 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        return Err(ChannelError::Close("Received an unexpected revoke_and_ack"));
                }
 
+               self.commitment_secrets.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
+                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one"))?;
+               self.latest_monitor_update_id += 1;
+               let mut monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+                               idx: self.cur_remote_commitment_transaction_number + 1,
+                               secret: msg.per_commitment_secret,
+                       }],
+               };
+               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
+
                // Update state now that we've passed all the can-fail calls...
                // (note that we may still fail to generate the new commitment_signed message, but that's
                // OK, we step the channel here and *then* if the new generation fails we can fail the
@@ -2134,28 +2236,44 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                // When the monitor updating is restored we'll call get_last_commitment_update(),
                                // which does not update state, but we're definitely now awaiting a remote revoke
                                // before we can step forward any more, so set it here.
-                               self.send_commitment_no_status_check()?;
+                               let (_, mut additional_update) = self.send_commitment_no_status_check()?;
+                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
                        }
                        self.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
-                       return Ok((None, Vec::new(), Vec::new(), None, self.channel_monitor.clone()));
+                       return Ok((None, Vec::new(), Vec::new(), None, monitor_update))
                }
 
                match self.free_holding_cell_htlcs()? {
-                       Some(mut commitment_update) => {
-                               commitment_update.0.update_fail_htlcs.reserve(update_fail_htlcs.len());
+                       Some((mut commitment_update, mut additional_update)) => {
+                               commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len());
                                for fail_msg in update_fail_htlcs.drain(..) {
-                                       commitment_update.0.update_fail_htlcs.push(fail_msg);
+                                       commitment_update.update_fail_htlcs.push(fail_msg);
                                }
-                               commitment_update.0.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
+                               commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
                                for fail_msg in update_fail_malformed_htlcs.drain(..) {
-                                       commitment_update.0.update_fail_malformed_htlcs.push(fail_msg);
+                                       commitment_update.update_fail_malformed_htlcs.push(fail_msg);
                                }
-                               Ok((Some(commitment_update.0), to_forward_infos, revoked_htlcs, None, commitment_update.1))
+
+                               // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
+
+                               Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, None, monitor_update))
                        },
                        None => {
                                if require_commitment {
-                                       let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
+                                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check()?;
+
+                                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                                       // strictly increasing by one, so decrement it here.
+                                       self.latest_monitor_update_id = monitor_update.update_id;
+                                       monitor_update.updates.append(&mut additional_update.updates);
+
                                        Ok((Some(msgs::CommitmentUpdate {
                                                update_add_htlcs: Vec::new(),
                                                update_fulfill_htlcs: Vec::new(),
@@ -2165,7 +2283,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                commitment_signed
                                        }), to_forward_infos, revoked_htlcs, None, monitor_update))
                                } else {
-                                       Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), self.channel_monitor.clone()))
+                                       Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update))
                                }
                        }
                }
@@ -2200,7 +2318,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                })
        }
 
-       pub fn send_update_fee_and_commit(&mut self, feerate_per_kw: u64) -> Result<Option<(msgs::UpdateFee, msgs::CommitmentSigned, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+       pub fn send_update_fee_and_commit(&mut self, feerate_per_kw: u64) -> Result<Option<(msgs::UpdateFee, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> {
                match self.send_update_fee(feerate_per_kw) {
                        Some(update_fee) => {
                                let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
@@ -2285,7 +2403,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// which failed. The messages which were generated from that call which generated the
        /// monitor update failure must *not* have been sent to the remote end, and must instead
        /// have been dropped. They will be regenerated when monitor_updating_restored is called.
-       pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
+       pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                self.monitor_pending_revoke_and_ack = resend_raa;
                self.monitor_pending_commitment_signed = resend_commitment;
@@ -2299,7 +2417,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Indicates that the latest ChannelMonitor update has been committed by the client
        /// successfully and we should restore normal operation. Returns messages which should be sent
        /// to the remote side.
-       pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
+       pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
 
@@ -2351,7 +2469,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
        }
 
-       pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError<ChanSigner>> {
+       pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
+               where F::Target: FeeEstimator
+       {
                if self.channel_outbound {
                        return Err(ChannelError::Close("Non-funding remote tried to update channel fee"));
                }
@@ -2433,7 +2553,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        /// May panic if some calls other than message-handling calls (which will all Err immediately)
        /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
-       pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor<ChanSigner>>, RAACommitmentOrder, Option<msgs::Shutdown>), ChannelError<ChanSigner>> {
+       pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitorUpdate>, RAACommitmentOrder, Option<msgs::Shutdown>), ChannelError> {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
@@ -2453,8 +2573,18 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
                                        }
                                        if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
-                                               self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
-                                               return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())});
+                                               self.latest_monitor_update_id += 1;
+                                               let monitor_update = ChannelMonitorUpdate {
+                                                       update_id: self.latest_monitor_update_id,
+                                                       updates: vec![ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo {
+                                                               their_current_per_commitment_point: data_loss.my_current_per_commitment_point
+                                                       }]
+                                               };
+                                               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
+                                               return Err(ChannelError::CloseDelayBroadcast {
+                                                       msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting",
+                                                       update: monitor_update
+                                               });
                                        }
                                },
                                OptionalField::Absent => {}
@@ -2538,7 +2668,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                match self.free_holding_cell_htlcs() {
                                        Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
                                        Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
-                                       Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
+                                       Ok(Some((commitment_update, monitor_update))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), shutdown_msg)),
                                        Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
                                }
                        } else {
@@ -2562,7 +2692,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                }
        }
 
-       fn maybe_propose_first_closing_signed(&mut self, fee_estimator: &FeeEstimator) -> Option<msgs::ClosingSigned> {
+       fn maybe_propose_first_closing_signed<F: Deref>(&mut self, fee_estimator: &F) -> Option<msgs::ClosingSigned>
+               where F::Target: FeeEstimator
+       {
                if !self.channel_outbound || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() ||
                                self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK ||
                                self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() {
@@ -2590,7 +2722,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                })
        }
 
-       pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError<ChanSigner>> {
+       pub fn shutdown<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+               where F::Target: FeeEstimator
+       {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish"));
                }
@@ -2686,7 +2820,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                tx.input[0].witness.push(self.get_funding_redeemscript().into_bytes());
        }
 
-       pub fn closing_signed(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError<ChanSigner>> {
+       pub fn closing_signed<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+               where F::Target: FeeEstimator
+       {
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
                        return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown"));
                }
@@ -2800,7 +2936,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                if self.channel_state < ChannelState::FundingCreated as u32 {
                        panic!("Can't get a channel monitor until funding has been created");
                }
-               &mut self.channel_monitor
+               self.channel_monitor.as_mut().unwrap()
        }
 
        /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
@@ -2813,7 +2949,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Returns the funding_txo we either got from our peer, or were given by
        /// get_outbound_funding_created.
        pub fn get_funding_txo(&self) -> Option<OutPoint> {
-               self.channel_monitor.get_funding_txo()
+               self.funding_txo
        }
 
        /// Allowed in any state (including after shutdown)
@@ -2890,6 +3026,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                self.channel_update_count
        }
 
+       pub fn get_latest_monitor_update_id(&self) -> u64 {
+               self.latest_monitor_update_id
+       }
+
        pub fn should_announce(&self) -> bool {
                self.config.announced_channel
        }
@@ -2900,7 +3040,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
        /// Allowed in any state (including after shutdown)
-       pub fn get_our_fee_base_msat(&self, fee_estimator: &FeeEstimator) -> u32 {
+       pub fn get_our_fee_base_msat<F: Deref>(&self, fee_estimator: &F) -> u32
+               where F::Target: FeeEstimator
+       {
                // For lack of a better metric, we calculate what it would cost to consolidate the new HTLC
                // output value back into a transaction with the regular channel output:
 
@@ -2993,8 +3135,8 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                }
                if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
                        for (ref tx, index_in_block) in txn_matched.iter().zip(indexes_of_txn_matched) {
-                               if tx.txid() == self.channel_monitor.get_funding_txo().unwrap().txid {
-                                       let txo_idx = self.channel_monitor.get_funding_txo().unwrap().index as usize;
+                               if tx.txid() == self.funding_txo.unwrap().txid {
+                                       let txo_idx = self.funding_txo.unwrap().index as usize;
                                        if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
                                                        tx.output[txo_idx].value != self.channel_value_satoshis {
                                                if self.channel_outbound {
@@ -3033,7 +3175,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                }
                if header.bitcoin_hash() != self.last_block_connected {
                        self.last_block_connected = header.bitcoin_hash();
-                       self.channel_monitor.last_block_hash = self.last_block_connected;
+                       if let Some(channel_monitor) = self.channel_monitor.as_mut() {
+                               channel_monitor.last_block_hash = self.last_block_connected;
+                       }
                        if self.funding_tx_confirmations > 0 {
                                if self.funding_tx_confirmations == self.minimum_depth as u64 {
                                        let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
@@ -3093,14 +3237,18 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
                }
                self.last_block_connected = header.bitcoin_hash();
-               self.channel_monitor.last_block_hash = self.last_block_connected;
+               if let Some(channel_monitor) = self.channel_monitor.as_mut() {
+                       channel_monitor.last_block_hash = self.last_block_connected;
+               }
                false
        }
 
        // Methods to get unprompted messages to send to the remote end (or where we already returned
        // something in the handler for the message that prompted this message):
 
-       pub fn get_open_channel(&self, chain_hash: Sha256dHash, fee_estimator: &FeeEstimator) -> msgs::OpenChannel {
+       pub fn get_open_channel<F: Deref>(&self, chain_hash: Sha256dHash, fee_estimator: &F) -> msgs::OpenChannel
+               where F::Target: FeeEstimator
+       {
                if !self.channel_outbound {
                        panic!("Tried to open a channel for an inbound channel?");
                }
@@ -3170,7 +3318,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        }
 
        /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
-       fn get_outbound_funding_created_signature(&mut self) -> Result<(Signature, Transaction), ChannelError<ChanSigner>> {
+       fn get_outbound_funding_created_signature(&mut self) -> Result<(Signature, Transaction), ChannelError> {
                let remote_keys = self.build_remote_transaction_keys()?;
                let remote_initial_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, false, self.feerate_per_kw).0;
                Ok((self.local_keys.sign_remote_commitment(self.feerate_per_kw, &remote_initial_commitment_tx, &remote_keys, &Vec::new(), self.our_to_self_delay, &self.secp_ctx)
@@ -3184,27 +3332,25 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_outbound_funding_created(&mut self, funding_txo: OutPoint) -> Result<(msgs::FundingCreated, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       pub fn get_outbound_funding_created(&mut self, funding_txo: OutPoint) -> Result<(msgs::FundingCreated, ChannelMonitor<ChanSigner>), ChannelError> {
                if !self.channel_outbound {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
                if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
                        panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
                }
-               if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+               if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
                                self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
-               let funding_txo_script = self.get_funding_redeemscript().to_v0_p2wsh();
-               self.channel_monitor.set_funding_info((funding_txo, funding_txo_script));
-
+               self.funding_txo = Some(funding_txo.clone());
                let (our_signature, commitment_tx) = match self.get_outbound_funding_created_signature() {
                        Ok(res) => res,
                        Err(e) => {
                                log_error!(self, "Got bad signatures: {:?}!", e);
-                               self.channel_monitor.unset_funding_info();
+                               self.funding_txo = None;
                                return Err(e);
                        }
                };
@@ -3212,7 +3358,28 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let temporary_channel_id = self.channel_id;
 
                // Now that we're past error-generating stuff, update our local state:
-               self.channel_monitor.provide_latest_remote_commitment_tx_info(&commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+
+               let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
+               let funding_redeemscript = self.get_funding_redeemscript();
+               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               macro_rules! create_monitor {
+                       () => { {
+                               let mut channel_monitor = ChannelMonitor::new(self.local_keys.clone(),
+                                                                             &self.shutdown_pubkey, self.our_to_self_delay,
+                                                                             &self.destination_script, (funding_txo, funding_txo_script.clone()),
+                                                                             &their_pubkeys.htlc_basepoint, &their_pubkeys.delayed_payment_basepoint,
+                                                                             self.their_to_self_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
+                                                                             self.get_commitment_transaction_number_obscure_factor(),
+                                                                             self.logger.clone());
+
+                               channel_monitor.provide_latest_remote_commitment_tx_info(&commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+                               channel_monitor
+                       } }
+               }
+
+               self.channel_monitor = Some(create_monitor!());
+               let channel_monitor = create_monitor!();
+
                self.channel_state = ChannelState::FundingCreated as u32;
                self.channel_id = funding_txo.to_channel_id();
                self.cur_remote_commitment_transaction_number -= 1;
@@ -3222,7 +3389,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        funding_txid: funding_txo.txid,
                        funding_output_index: funding_txo.index,
                        signature: our_signature
-               }, self.channel_monitor.clone()))
+               }, channel_monitor))
        }
 
        /// Gets an UnsignedChannelAnnouncement, as well as a signature covering it using our
@@ -3233,7 +3400,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// closing).
        /// Note that the "channel must be funded" requirement is stricter than BOLT 7 requires - see
        /// https://github.com/lightningnetwork/lightning-rfc/issues/468
-       pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError<ChanSigner>> {
+       pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError> {
                if !self.config.announced_channel {
                        return Err(ChannelError::Ignore("Channel is not available for public announcements"));
                }
@@ -3270,7 +3437,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
                assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
                let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
-                       let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
+                       let remote_last_secret = self.commitment_secrets.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
                        log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
                        OptionalField::Present(DataLossProtect {
                                your_last_per_commitment_secret: remote_last_secret,
@@ -3317,7 +3484,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
        /// You MUST call send_commitment prior to any other calls on this Channel
        /// If an Err is returned, it's a ChannelError::Ignore!
-       pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError<ChanSigner>> {
+       pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
                        return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down"));
                }
@@ -3394,7 +3561,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// Always returns a ChannelError::Close if an immediately-preceding (read: the
        /// last call to this Channel) send_htlc returned Ok(Some(_)) and there is an Err.
        /// May panic if called except immediately after a successful, Ok(Some(_))-returning send_htlc.
-       pub fn send_commitment(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       pub fn send_commitment(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        panic!("Cannot create commitment tx until channel is fully established");
                }
@@ -3426,7 +3593,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                self.send_commitment_no_status_check()
        }
        /// Only fails in case of bad keys
-       fn send_commitment_no_status_check(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+       fn send_commitment_no_status_check(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> {
                // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
                // fail to generate this, we still are at least at a position where upgrading their status
                // is acceptable.
@@ -3450,20 +3617,31 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() {
                        Ok((res, (remote_commitment_tx, mut htlcs))) => {
                                // Update state now that we've passed all the can-fail calls...
-                               let htlcs_no_ref = htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+                               let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+                                       htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
                                (res, remote_commitment_tx, htlcs_no_ref)
                        },
                        Err(e) => return Err(e),
                };
 
-               self.channel_monitor.provide_latest_remote_commitment_tx_info(&remote_commitment_tx, htlcs, self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+               self.latest_monitor_update_id += 1;
+               let monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo {
+                               unsigned_commitment_tx: remote_commitment_tx.clone(),
+                               htlc_outputs: htlcs.clone(),
+                               commitment_number: self.cur_remote_commitment_transaction_number,
+                               their_revocation_point: self.their_cur_commitment_point.unwrap()
+                       }]
+               };
+               self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
                self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
-               Ok((res, self.channel_monitor.clone()))
+               Ok((res, monitor_update))
        }
 
        /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
        /// when we shouldn't change HTLC/channel state.
-       fn send_commitment_no_state_update(&self) -> Result<(msgs::CommitmentSigned, (Transaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError<ChanSigner>> {
+       fn send_commitment_no_state_update(&self) -> Result<(msgs::CommitmentSigned, (Transaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> {
                let mut feerate_per_kw = self.feerate_per_kw;
                if let Some(feerate) = self.pending_update_fee {
                        if self.channel_outbound {
@@ -3511,7 +3689,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// to send to the remote peer in one go.
        /// Shorthand for calling send_htlc() followed by send_commitment(), see docs on those for
        /// more info.
-       pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+       pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> {
                match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet)? {
                        Some(update_add_htlc) => {
                                let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
@@ -3600,7 +3778,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.channel_update_count += 1;
-               (self.channel_monitor.get_latest_local_commitment_txn(), dropped_outbound_htlcs)
+               if self.channel_monitor.is_some() {
+                       (self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs)
+               } else {
+                       // We aren't even signed funding yet, so can't broadcast anything
+                       (Vec::new(), dropped_outbound_htlcs)
+               }
        }
 }
 
@@ -3655,8 +3838,11 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
                self.channel_outbound.write(writer)?;
                self.channel_value_satoshis.write(writer)?;
 
+               self.latest_monitor_update_id.write(writer)?;
+
                self.local_keys.write(writer)?;
                self.shutdown_pubkey.write(writer)?;
+               self.destination_script.write(writer)?;
 
                self.cur_local_commitment_transaction_number.write(writer)?;
                self.cur_remote_commitment_transaction_number.write(writer)?;
@@ -3803,6 +3989,7 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
                        None => 0u8.write(writer)?,
                }
 
+               write_option!(self.funding_txo);
                write_option!(self.funding_tx_confirmed_in);
                write_option!(self.short_channel_id);
 
@@ -3828,7 +4015,9 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
 
                write_option!(self.their_shutdown_scriptpubkey);
 
-               self.channel_monitor.write_for_disk(writer)?;
+               self.commitment_secrets.write(writer)?;
+
+               self.channel_monitor.as_ref().unwrap().write_for_disk(writer)?;
                Ok(())
        }
 }
@@ -3849,8 +4038,11 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                let channel_outbound = Readable::read(reader)?;
                let channel_value_satoshis = Readable::read(reader)?;
 
+               let latest_monitor_update_id = Readable::read(reader)?;
+
                let local_keys = Readable::read(reader)?;
                let shutdown_pubkey = Readable::read(reader)?;
+               let destination_script = Readable::read(reader)?;
 
                let cur_local_commitment_transaction_number = Readable::read(reader)?;
                let cur_remote_commitment_transaction_number = Readable::read(reader)?;
@@ -3953,6 +4145,7 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        _ => return Err(DecodeError::InvalidValue),
                };
 
+               let funding_txo = Readable::read(reader)?;
                let funding_tx_confirmed_in = Readable::read(reader)?;
                let short_channel_id = Readable::read(reader)?;
 
@@ -3977,6 +4170,8 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                let their_node_id = Readable::read(reader)?;
 
                let their_shutdown_scriptpubkey = Readable::read(reader)?;
+               let commitment_secrets = Readable::read(reader)?;
+
                let (monitor_last_block, channel_monitor) = ReadableArgs::read(reader, logger.clone())?;
                // We drop the ChannelMonitor's last block connected hash cause we don't actually bother
                // doing full block connection operations on the internal ChannelMonitor copies
@@ -3994,8 +4189,11 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        secp_ctx: Secp256k1::new(),
                        channel_value_satoshis,
 
+                       latest_monitor_update_id,
+
                        local_keys,
                        shutdown_pubkey,
+                       destination_script,
 
                        cur_local_commitment_transaction_number,
                        cur_remote_commitment_transaction_number,
@@ -4027,6 +4225,7 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
 
                        last_sent_closing_fee,
 
+                       funding_txo,
                        funding_tx_confirmed_in,
                        short_channel_id,
                        last_block_connected,
@@ -4051,7 +4250,8 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
 
                        their_shutdown_scriptpubkey,
 
-                       channel_monitor,
+                       channel_monitor: Some(channel_monitor),
+                       commitment_secrets,
 
                        network_sync: UpdateStatus::Fresh,
 
@@ -4156,17 +4356,17 @@ mod tests {
 
                assert_eq!(PublicKey::from_secret_key(&secp_ctx, chan_keys.funding_key()).serialize()[..],
                                hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
-               let keys_provider: Arc<KeysInterface<ChanKeySigner = InMemoryChannelKeys>> = Arc::new(Keys { chan_keys });
+               let keys_provider = Keys { chan_keys };
 
                let their_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let mut config = UserConfig::default();
                config.channel_options.announced_channel = false;
-               let mut chan = Channel::<InMemoryChannelKeys>::new_outbound(&feeest, &keys_provider, their_node_id, 10000000, 100000, 42, Arc::clone(&logger), &config).unwrap(); // Nothing uses their network key in this test
+               let mut chan = Channel::<InMemoryChannelKeys>::new_outbound(&&feeest, &&keys_provider, their_node_id, 10000000, 100000, 42, Arc::clone(&logger), &config).unwrap(); // Nothing uses their network key in this test
                chan.their_to_self_delay = 144;
                chan.our_dust_limit_satoshis = 546;
 
                let funding_info = OutPoint::new(Sha256dHash::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), 0);
-               chan.channel_monitor.set_funding_info((funding_info, Script::new()));
+               chan.funding_txo = Some(funding_info);
 
                let their_pubkeys = ChannelPublicKeys {
                        funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
index 015894e594dc1246efecef452d312171eed099ba..c515ceab9a8b5c174942e83e85ac3adb5089d219 100644 (file)
@@ -34,7 +34,7 @@ use ln::features::InitFeatures;
 use ln::msgs;
 use ln::onion_utils;
 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
-use chain::keysinterface::{ChannelKeys, KeysInterface, InMemoryChannelKeys};
+use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
 use util::config::UserConfig;
 use util::{byte_utils, events};
 use util::ser::{Readable, ReadableArgs, Writeable, Writer};
@@ -57,15 +57,19 @@ use std::ops::Deref;
 // forward the HTLC with information it will give back to us when it does so, or if it should Fail
 // the HTLC with the relevant message for the Channel to handle giving to the remote peer.
 //
-// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
-// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
-// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
-// the HTLC backwards along the relevant path).
+// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
+// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
+// with it to track where it came from (in case of onwards-forward error), waiting a random delay
+// before we forward it.
+//
+// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
+// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
+// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
 // our payment, which we can use to decode errors or inform the user that the payment was sent.
-/// Stores the info we will need to send when we want to forward an HTLC onwards
+
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) struct PendingForwardHTLCInfo {
+pub(super) struct PendingHTLCInfo {
        onion_packet: Option<msgs::OnionPacket>,
        incoming_shared_secret: [u8; 32],
        payment_hash: PaymentHash,
@@ -83,10 +87,22 @@ pub(super) enum HTLCFailureMsg {
 /// Stores whether we can't forward an HTLC or relevant forwarding info
 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
 pub(super) enum PendingHTLCStatus {
-       Forward(PendingForwardHTLCInfo),
+       Forward(PendingHTLCInfo),
        Fail(HTLCFailureMsg),
 }
 
+pub(super) enum HTLCForwardInfo {
+       AddHTLC {
+               prev_short_channel_id: u64,
+               prev_htlc_id: u64,
+               forward_info: PendingHTLCInfo,
+       },
+       FailHTLC {
+               htlc_id: u64,
+               err_packet: msgs::OnionErrorPacket,
+       },
+}
+
 /// Tracks the inbound corresponding to an outbound HTLC
 #[derive(Clone, PartialEq)]
 pub(super) struct HTLCPreviousHopData {
@@ -194,7 +210,7 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn from_chan_no_close<ChanSigner: ChannelKeys>(err: ChannelError<ChanSigner>, channel_id: [u8; 32]) -> Self {
+       fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
                Self {
                        err: match err {
                                ChannelError::Ignore(msg) => LightningError {
@@ -231,18 +247,6 @@ impl MsgHandleErrInternal {
 /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
 
-pub(super) enum HTLCForwardInfo {
-       AddHTLC {
-               prev_short_channel_id: u64,
-               prev_htlc_id: u64,
-               forward_info: PendingForwardHTLCInfo,
-       },
-       FailHTLC {
-               htlc_id: u64,
-               err_packet: msgs::OnionErrorPacket,
-       },
-}
-
 /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
 /// be sent in the order they appear in the return value, however sometimes the order needs to be
 /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
@@ -262,7 +266,7 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
        /// short channel id -> forward infos. Key of 0 means payments received
        /// Note that while this is held in the same mutex as the channels themselves, no consistency
        /// guarantees are made about the existence of a channel with the short id here, nor the short
-       /// ids in the PendingForwardHTLCInfo!
+       /// ids in the PendingHTLCInfo!
        pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
        /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
        /// can be failed/claimed by the user
@@ -288,16 +292,20 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
 /// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
-/// issues such as overly long function definitions.
-pub type SimpleArcChannelManager<M> = Arc<ChannelManager<InMemoryChannelKeys, Arc<M>>>;
+/// issues such as overly long function definitions. Note that the ChannelManager can take any
+/// type that implements KeysInterface for its keys manager, but this type alias chooses the
+/// concrete type of the KeysManager.
+pub type SimpleArcChannelManager<M, T, F> = Arc<ChannelManager<InMemoryChannelKeys, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>>>;
 
 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
 /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
 /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
 /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
-/// helps with issues such as long function definitions.
-pub type SimpleRefChannelManager<'a, M> = ChannelManager<InMemoryChannelKeys, &'a M>;
+/// helps with issues such as long function definitions. Note that the ChannelManager can take any
+/// type that implements KeysInterface for its keys manager, but this type alias chooses the
+/// concrete type of the KeysManager.
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, M, T, F> = ChannelManager<InMemoryChannelKeys, &'a M, &'b T, &'c KeysManager, &'d F>;
 
 /// Manager which keeps track of a number of channels and sends messages to the appropriate
 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
@@ -312,7 +320,7 @@ pub type SimpleRefChannelManager<'a, M> = ChannelManager<InMemoryChannelKeys, &'
 ///
 /// Note that you can be a bit lazier about writing out ChannelManager than you can be with
 /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
+/// returning from ManyChannelMonitor::add_/update_monitor, with ChannelManagers, writing updates
 /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
 /// the serialization process). If the deserialized version is out-of-date compared to the
 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
@@ -335,12 +343,17 @@ pub type SimpleRefChannelManager<'a, M> = ChannelManager<InMemoryChannelKeys, &'
 /// essentially you should default to using a SimpleRefChannelManager, and use a
 /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
 /// you're using lightning-net-tokio.
-pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref> where M::Target: ManyChannelMonitor<ChanSigner> {
+pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        default_configuration: UserConfig,
        genesis_hash: Sha256dHash,
-       fee_estimator: Arc<FeeEstimator>,
+       fee_estimator: F,
        monitor: M,
-       tx_broadcaster: Arc<BroadcasterInterface>,
+       tx_broadcaster: T,
 
        #[cfg(test)]
        pub(super) latest_block_height: AtomicUsize,
@@ -369,7 +382,7 @@ pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref> where M::Target: Ma
        /// Taken first everywhere where we are making changes before any other locks.
        total_consistency_lock: RwLock<()>,
 
-       keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>,
+       keys_manager: K,
 
        logger: Arc<Logger>,
 }
@@ -477,7 +490,7 @@ macro_rules! break_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(ChannelError::Ignore(msg)) => {
-                               break Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore(msg), $entry.key().clone()))
+                               break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
                        },
                        Err(ChannelError::Close(msg)) => {
                                log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
@@ -497,7 +510,7 @@ macro_rules! try_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(ChannelError::Ignore(msg)) => {
-                               return Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore(msg), $entry.key().clone()))
+                               return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
                        },
                        Err(ChannelError::Close(msg)) => {
                                log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
@@ -513,16 +526,14 @@ macro_rules! try_chan_entry {
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        $channel_state.short_to_id.remove(&short_id);
                                }
-                               if let Some(update) = update {
-                                       if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update.clone()) {
-                                               match e {
-                                                       // Upstream channel is dead, but we want at least to fail backward HTLCs to save
-                                                       // downstream channels. In case of PermanentFailure, we are not going to be able
-                                                       // to claim back to_remote output on remote commitment transaction. Doesn't
-                                                       // make a difference here, we are concern about HTLCs circuit, not onchain funds.
-                                                       ChannelMonitorUpdateErr::PermanentFailure => {},
-                                                       ChannelMonitorUpdateErr::TemporaryFailure => {},
-                                               }
+                               if let Err(e) = $self.monitor.update_monitor(chan.get_funding_txo().unwrap(), update) {
+                                       match e {
+                                               // Upstream channel is dead, but we want at least to fail backward HTLCs to save
+                                               // downstream channels. In case of PermanentFailure, we are not going to be able
+                                               // to claim back to_remote output on remote commitment transaction. Doesn't
+                                               // make a difference here, we are concern about HTLCs circuit, not onchain funds.
+                                               ChannelMonitorUpdateErr::PermanentFailure => {},
+                                               ChannelMonitorUpdateErr::TemporaryFailure => {},
                                        }
                                }
                                let mut shutdown_res = chan.force_shutdown();
@@ -571,7 +582,7 @@ macro_rules! handle_monitor_err {
                                                        } else if $resend_commitment { "commitment" }
                                                        else if $resend_raa { "RAA" }
                                                        else { "nothing" },
-                                               (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
+                                               (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
                                                (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
                                if !$resend_commitment {
                                        debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
@@ -580,7 +591,7 @@ macro_rules! handle_monitor_err {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
                                $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-                               Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
+                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
                        },
                }
        }
@@ -607,7 +618,12 @@ macro_rules! maybe_break_monitor_err {
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        /// Constructs a new ChannelManager to hold several channels and route between them.
        ///
        /// This is the main "logic hub" for all channel-related actions, and implements
@@ -626,13 +642,13 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
        /// the ChannelManager as a listener to the BlockNotifier and call the BlockNotifier's
        /// `block_(dis)connected` methods, which will notify all registered listeners in one
        /// go.
-       pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: M, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>, config: UserConfig, current_blockchain_height: usize) -> Result<ChannelManager<ChanSigner, M>, secp256k1::Error> {
+       pub fn new(network: Network, fee_est: F, monitor: M, tx_broadcaster: T, logger: Arc<Logger>, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Result<ChannelManager<ChanSigner, M, T, K, F>, secp256k1::Error> {
                let secp_ctx = Secp256k1::new();
 
                let res = ChannelManager {
                        default_configuration: config.clone(),
                        genesis_hash: genesis_block(network).header.bitcoin_hash(),
-                       fee_estimator: feeest.clone(),
+                       fee_estimator: fee_est,
                        monitor,
                        tx_broadcaster,
 
@@ -679,8 +695,8 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
                }
 
-               let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
-               let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
+               let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
+               let res = channel.get_open_channel(self.genesis_hash.clone(), &self.fee_estimator);
 
                let _ = self.total_consistency_lock.read().unwrap();
                let mut channel_state = self.channel_state.lock().unwrap();
@@ -701,7 +717,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                Ok(())
        }
 
-       fn list_channels_with_filter<F: FnMut(&(&[u8; 32], &Channel<ChanSigner>)) -> bool>(&self, f: F) -> Vec<ChannelDetails> {
+       fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<ChanSigner>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
                let mut res = Vec::new();
                {
                        let channel_state = self.channel_state.lock().unwrap();
@@ -963,7 +979,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                // instead we stay symmetric with the forwarding case, only responding (after a
                                // delay) once they've send us a commitment_signed!
 
-                               PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+                               PendingHTLCStatus::Forward(PendingHTLCInfo {
                                        onion_packet: None,
                                        payment_hash: msg.payment_hash.clone(),
                                        short_channel_id: 0,
@@ -1015,7 +1031,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        },
                                };
 
-                               PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+                               PendingHTLCStatus::Forward(PendingHTLCInfo {
                                        onion_packet: Some(outgoing_packet),
                                        payment_hash: msg.payment_hash.clone(),
                                        short_channel_id: short_channel_id,
@@ -1026,7 +1042,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        };
 
                channel_state = Some(self.channel_state.lock().unwrap());
-               if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+               if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
                                let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
                                let forwarding_id = match id_option {
@@ -1049,7 +1065,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
                                                break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
                                        }
-                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+                                       let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&self.fee_estimator) as u64) });
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
                                                break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
                                        }
@@ -1105,7 +1121,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
                        cltv_expiry_delta: CLTV_EXPIRY_DELTA,
                        htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
-                       fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
+                       fee_base_msat: chan.get_our_fee_base_msat(&self.fee_estimator),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
                };
@@ -1193,8 +1209,8 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                first_hop_htlc_msat: htlc_msat,
                                        }, onion_packet), channel_state, chan)
                                } {
-                                       Some((update_add, commitment_signed, chan_monitor)) => {
-                                               if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                       Some((update_add, commitment_signed, monitor_update)) => {
+                                               if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
                                                        // Note that MonitorUpdateFailed here indicates (per function docs)
                                                        // that we will resent the commitment update once we unfree monitor
@@ -1259,8 +1275,8 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
-               // lock before add_update_monitor
-               if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+               // lock before add_monitor
+               if let Err(e) = self.monitor.add_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                        match e {
                                ChannelMonitorUpdateErr::PermanentFailure => {
                                        {
@@ -1428,7 +1444,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                }
 
                                                if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
-                                                       let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
+                                                       let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment() {
                                                                Ok(res) => res,
                                                                Err(e) => {
                                                                        // We surely failed send_commitment due to bad keys, in that case
@@ -1454,7 +1470,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                                        }
                                                                }
                                                        };
-                                                       if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+                                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                                handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
                                                                continue;
                                                        }
@@ -1727,8 +1743,8 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                                                match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
                                                        Ok((msgs, monitor_option)) => {
-                                                               if let Some(chan_monitor) = monitor_option {
-                                                                       if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                                               if let Some(monitor_update) = monitor_option {
+                                                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                                                if was_frozen_for_monitor {
                                                                                        assert!(msgs.is_none());
                                                                                } else {
@@ -1771,103 +1787,98 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
        }
 
-       /// Used to restore channels to normal operation after a
+       /// Restores a single, given channel to normal operation after a
        /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
        /// operation.
-       pub fn test_restore_channel_monitor(&self) {
+       ///
+       /// All ChannelMonitor updates up to and including highest_applied_update_id must have been
+       /// fully committed in every copy of the given channels' ChannelMonitors.
+       ///
+       /// Note that there is no effect to calling with a highest_applied_update_id other than the
+       /// current latest ChannelMonitorUpdate and one call to this function after multiple
+       /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
+       /// exists largely only to prevent races between this and concurrent update_monitor calls.
+       ///
+       /// Thus, the anticipated use is, at a high level:
+       ///  1) You register a ManyChannelMonitor with this ChannelManager,
+       ///  2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
+       ///     said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
+       ///     any time it cannot do so instantly,
+       ///  3) update(s) are applied to each remote copy of a ChannelMonitor,
+       ///  4) once all remote copies are updated, you call this function with the update_id that
+       ///     completed, and once it is the latest the Channel will be re-enabled.
+       pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
+               let _ = self.total_consistency_lock.read().unwrap();
+
                let mut close_results = Vec::new();
                let mut htlc_forwards = Vec::new();
                let mut htlc_failures = Vec::new();
                let mut pending_events = Vec::new();
-               let _ = self.total_consistency_lock.read().unwrap();
 
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let short_to_id = &mut channel_state.short_to_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       channel_state.by_id.retain(|_, channel| {
-                               if channel.is_awaiting_monitor_update() {
-                                       let chan_monitor = channel.channel_monitor().clone();
-                                       if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                                               match e {
-                                                       ChannelMonitorUpdateErr::PermanentFailure => {
-                                                               // TODO: There may be some pending HTLCs that we intended to fail
-                                                               // backwards when a monitor update failed. We should make sure
-                                                               // knowledge of those gets moved into the appropriate in-memory
-                                                               // ChannelMonitor and they get failed backwards once we get
-                                                               // on-chain confirmations.
-                                                               // Note I think #198 addresses this, so once it's merged a test
-                                                               // should be written.
-                                                               if let Some(short_id) = channel.get_short_channel_id() {
-                                                                       short_to_id.remove(&short_id);
-                                                               }
-                                                               close_results.push(channel.force_shutdown());
-                                                               if let Ok(update) = self.get_channel_update(&channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: update
-                                                                       });
-                                                               }
-                                                               false
-                                                       },
-                                                       ChannelMonitorUpdateErr::TemporaryFailure => true,
-                                               }
-                                       } else {
-                                               let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
-                                               if !pending_forwards.is_empty() {
-                                                       htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
-                                               }
-                                               htlc_failures.append(&mut pending_failures);
+                       let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
+                               Some(chan) => chan,
+                               None => return,
+                       };
+                       if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+                               return;
+                       }
 
-                                               macro_rules! handle_cs { () => {
-                                                       if let Some(update) = commitment_update {
-                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: channel.get_their_node_id(),
-                                                                       updates: update,
-                                                               });
-                                                       }
-                                               } }
-                                               macro_rules! handle_raa { () => {
-                                                       if let Some(revoke_and_ack) = raa {
-                                                               pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                                                       node_id: channel.get_their_node_id(),
-                                                                       msg: revoke_and_ack,
-                                                               });
-                                                       }
-                                               } }
-                                               match order {
-                                                       RAACommitmentOrder::CommitmentFirst => {
-                                                               handle_cs!();
-                                                               handle_raa!();
-                                                       },
-                                                       RAACommitmentOrder::RevokeAndACKFirst => {
-                                                               handle_raa!();
-                                                               handle_cs!();
-                                                       },
-                                               }
-                                               if needs_broadcast_safe {
-                                                       pending_events.push(events::Event::FundingBroadcastSafe {
-                                                               funding_txo: channel.get_funding_txo().unwrap(),
-                                                               user_channel_id: channel.get_user_id(),
-                                                       });
-                                               }
-                                               if let Some(msg) = funding_locked {
-                                                       pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
-                                                               node_id: channel.get_their_node_id(),
-                                                               msg,
-                                                       });
-                                                       if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
-                                                               pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
-                                                                       node_id: channel.get_their_node_id(),
-                                                                       msg: announcement_sigs,
-                                                               });
-                                                       }
-                                                       short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
-                                               }
-                                               true
-                                       }
-                               } else { true }
-                       });
+                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
+                       if !pending_forwards.is_empty() {
+                               htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
+                       }
+                       htlc_failures.append(&mut pending_failures);
+
+                       macro_rules! handle_cs { () => {
+                               if let Some(update) = commitment_update {
+                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                               node_id: channel.get_their_node_id(),
+                                               updates: update,
+                                       });
+                               }
+                       } }
+                       macro_rules! handle_raa { () => {
+                               if let Some(revoke_and_ack) = raa {
+                                       pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+                                               node_id: channel.get_their_node_id(),
+                                               msg: revoke_and_ack,
+                                       });
+                               }
+                       } }
+                       match order {
+                               RAACommitmentOrder::CommitmentFirst => {
+                                       handle_cs!();
+                                       handle_raa!();
+                               },
+                               RAACommitmentOrder::RevokeAndACKFirst => {
+                                       handle_raa!();
+                                       handle_cs!();
+                               },
+                       }
+                       if needs_broadcast_safe {
+                               pending_events.push(events::Event::FundingBroadcastSafe {
+                                       funding_txo: channel.get_funding_txo().unwrap(),
+                                       user_channel_id: channel.get_user_id(),
+                               });
+                       }
+                       if let Some(msg) = funding_locked {
+                               pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+                                       node_id: channel.get_their_node_id(),
+                                       msg,
+                               });
+                               if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+                                       pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+                                               node_id: channel.get_their_node_id(),
+                                               msg: announcement_sigs,
+                                       });
+                               }
+                               short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+                       }
                }
 
                self.pending_events.lock().unwrap().append(&mut pending_events);
@@ -1887,7 +1898,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
                }
 
-               let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
+               let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
                        .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -1944,8 +1955,8 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
-               // lock before add_update_monitor
-               if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
+               // lock before add_monitor
+               if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
                        match e {
                                ChannelMonitorUpdateErr::PermanentFailure => {
                                        // Note that we reply with the new channel_id in error messages if we gave up on the
@@ -1989,8 +2000,17 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        if chan.get().get_their_node_id() != *their_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
-                                       if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                       let monitor_update = match chan.get_mut().funding_signed(&msg) {
+                                               Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+                                               Err((Some(monitor_update), e)) => {
+                                                       assert!(chan.get().is_awaiting_monitor_update());
+                                                       let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                                       try_chan_entry!(self, Err(e), channel_state, chan);
+                                                       unreachable!();
+                                               },
+                                               Ok(update) => update,
+                                       };
+                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
                                        }
                                        (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
@@ -2047,7 +2067,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry);
+                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: their_node_id.clone(),
@@ -2093,7 +2113,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry);
+                                       let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = closing_signed {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
                                                        node_id: their_node_id.clone(),
@@ -2152,7 +2172,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        // If the update_add is completely bogus, the call will Err and we will close,
                                        // but if we've sent a shutdown and they haven't acknowledged it yet, we just
                                        // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                       if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+                                       if let PendingHTLCStatus::Forward(PendingHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
                                                let chan_update = self.get_channel_update(chan.get());
                                                pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                        channel_id: msg.channel_id,
@@ -2229,7 +2249,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
                                if (msg.failure_code & 0x8000) == 0 {
-                                       let chan_err: ChannelError<ChanSigner> = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
+                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
                                        try_chan_entry!(self, Err(chan_err), channel_state, chan);
                                }
                                try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
@@ -2247,9 +2267,18 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                if chan.get().get_their_node_id() != *their_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
-                                       try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
-                               if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                               let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
+                                               match chan.get_mut().commitment_signed(&msg, &self.fee_estimator) {
+                                       Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+                                       Err((Some(update), e)) => {
+                                               assert!(chan.get().is_awaiting_monitor_update());
+                                               let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), update);
+                                               try_chan_entry!(self, Err(e), channel_state, chan);
+                                               unreachable!();
+                                       },
+                                       Ok(res) => res
+                               };
+                               if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                        return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
                                        //TODO: Rebroadcast closing_signed if present on monitor update restoration
                                }
@@ -2283,7 +2312,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
        }
 
        #[inline]
-       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingHTLCInfo, u64)>)]) {
                for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut forward_event = None;
                        if !pending_forwards.is_empty() {
@@ -2324,9 +2353,9 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
-                                               try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
-                                       if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) =
+                                               try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator), channel_state, chan);
+                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
                                                        assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
                                                        return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
@@ -2367,7 +2396,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                if chan.get().get_their_node_id() != *their_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan);
+                               try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                }
@@ -2395,7 +2424,7 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
                                if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
                                                self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
-                                       let chan_err: ChannelError<ChanSigner> = ChannelError::Close("Bad announcement_signatures node_signature");
+                                       let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature");
                                        try_chan_entry!(self, Err(chan_err), channel_state, chan);
                                }
 
@@ -2426,10 +2455,10 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                if chan.get().get_their_node_id() != *their_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) =
+                               let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
                                        try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
-                               if let Some(monitor) = channel_monitor {
-                                       if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+                               if let Some(monitor_update) = monitor_update_opt {
+                                       if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                // channel_reestablish doesn't guarantee the order it returns is sensical
                                                // for the messages it returns, but if we're setting what messages to
                                                // re-transmit on monitor update success, we need to make sure it is sane.
@@ -2512,10 +2541,10 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
                                                return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
                                        }
                                        their_node_id = chan.get().get_their_node_id();
-                                       if let Some((update_fee, commitment_signed, chan_monitor)) =
+                                       if let Some((update_fee, commitment_signed, monitor_update)) =
                                                        break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan)
                                        {
-                                               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                               if let Err(_e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        unimplemented!();
                                                }
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -2542,14 +2571,19 @@ impl<ChanSigner: ChannelKeys, M: Deref> ChannelManager<ChanSigner, M> where M::T
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
                // TODO: Event release to users and serialization is currently race-y: it's very easy for a
                // user to serialize a ChannelManager with pending events in it and lose those events on
                // restart. This is doubly true for the fail/fulfill-backs from monitor events!
                {
                        //TODO: This behavior should be documented.
-                       for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+                       for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
                                if let Some(preimage) = htlc_update.payment_preimage {
                                        log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
@@ -2567,14 +2601,19 @@ impl<ChanSigner: ChannelKeys, M: Deref> events::MessageSendEventsProvider for Ch
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref> events::EventsProvider for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> events::EventsProvider for ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
                // TODO: Event release to users and serialization is currently race-y: it's very easy for a
                // user to serialize a ChannelManager with pending events in it and lose those events on
                // restart. This is doubly true for the fail/fulfill-backs from monitor events!
                {
                        //TODO: This behavior should be documented.
-                       for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+                       for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
                                if let Some(preimage) = htlc_update.payment_preimage {
                                        log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
@@ -2592,7 +2631,13 @@ impl<ChanSigner: ChannelKeys, M: Deref> events::EventsProvider for ChannelManage
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChainListener for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send>
+       ChainListener for ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
                let header_hash = header.bitcoin_hash();
                log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
@@ -2709,7 +2754,13 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChainListener for ChannelM
        }
 }
 
-impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChannelMessageHandler for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send>
+       ChannelMessageHandler for ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
                let _ = self.total_consistency_lock.read().unwrap();
                let res = self.internal_open_channel(their_node_id, their_features, msg);
@@ -2998,7 +3049,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send> ChannelMessageHandler for
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
-impl Writeable for PendingForwardHTLCInfo {
+impl Writeable for PendingHTLCInfo {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                self.onion_packet.write(writer)?;
                self.incoming_shared_secret.write(writer)?;
@@ -3010,9 +3061,9 @@ impl Writeable for PendingForwardHTLCInfo {
        }
 }
 
-impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
-       fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
-               Ok(PendingForwardHTLCInfo {
+impl<R: ::std::io::Read> Readable<R> for PendingHTLCInfo {
+       fn read(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
+               Ok(PendingHTLCInfo {
                        onion_packet: Readable::read(reader)?,
                        incoming_shared_secret: Readable::read(reader)?,
                        payment_hash: Readable::read(reader)?,
@@ -3179,7 +3230,12 @@ impl<R: ::std::io::Read> Readable<R> for HTLCForwardInfo {
        }
 }
 
-impl<ChanSigner: ChannelKeys + Writeable, M: Deref> Writeable for ChannelManager<ChanSigner, M> where M::Target: ManyChannelMonitor<ChanSigner> {
+impl<ChanSigner: ChannelKeys + Writeable, M: Deref, T: Deref, K: Deref, F: Deref> Writeable for ChannelManager<ChanSigner, M, T, K, F>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
                let _ = self.total_consistency_lock.write().unwrap();
 
@@ -3250,15 +3306,21 @@ impl<ChanSigner: ChannelKeys + Writeable, M: Deref> Writeable for ChannelManager
 /// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
 /// 6) Disconnect/connect blocks on the ChannelManager.
 /// 7) Register the new ChannelManager with your ChainWatchInterface.
-pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref> where M::Target: ManyChannelMonitor<ChanSigner> {
+pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref>
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
+
        /// The keys provider which will give us relevant keys. Some keys will be loaded during
        /// deserialization.
-       pub keys_manager: Arc<KeysInterface<ChanKeySigner = ChanSigner>>,
+       pub keys_manager: K,
 
        /// The fee_estimator for use in the ChannelManager in the future.
        ///
        /// No calls to the FeeEstimator will be made during deserialization.
-       pub fee_estimator: Arc<FeeEstimator>,
+       pub fee_estimator: F,
        /// The ManyChannelMonitor for use in the ChannelManager in the future.
        ///
        /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
@@ -3269,7 +3331,7 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref> wh
        /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
        /// used to broadcast the latest local commitment transactions of channels which must be
        /// force-closed during deserialization.
-       pub tx_broadcaster: Arc<BroadcasterInterface>,
+       pub tx_broadcaster: T,
        /// The Logger for use in the ChannelManager and which may be used to log information during
        /// deserialization.
        pub logger: Arc<Logger>,
@@ -3290,8 +3352,14 @@ pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref> wh
        pub channel_monitors: &'a mut HashMap<OutPoint, &'a mut ChannelMonitor<ChanSigner>>,
 }
 
-impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>, M: Deref> ReadableArgs<R, ChannelManagerReadArgs<'a, ChanSigner, M>> for (Sha256dHash, ChannelManager<ChanSigner, M>) where M::Target: ManyChannelMonitor<ChanSigner> {
-       fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M>) -> Result<Self, DecodeError> {
+impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>, M: Deref, T: Deref, K: Deref, F: Deref>
+       ReadableArgs<R, ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F>> for (Sha256dHash, ChannelManager<ChanSigner, M, T, K, F>)
+       where M::Target: ManyChannelMonitor<ChanSigner>,
+        T::Target: BroadcasterInterface,
+        K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+        F::Target: FeeEstimator,
+{
+       fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F>) -> Result<Self, DecodeError> {
                let _ver: u8 = Readable::read(reader)?;
                let min_ver: u8 = Readable::read(reader)?;
                if min_ver > SERIALIZATION_VERSION {
@@ -3314,12 +3382,13 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>, M: Deref> R
                                return Err(DecodeError::InvalidValue);
                        }
 
-                       let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                       let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
                                                channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
-                                               channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
+                                               channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
+                                               channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
                                        let mut force_close_res = channel.force_shutdown();
                                        force_close_res.0 = monitor.get_latest_local_commitment_txn();
                                        closed_channels.push(force_close_res);
index 68674ac3ae098c3db7d70a173755e44232c77c44..1bc8c76b49e82765f73e550c41e17c3243a027fc 100644 (file)
@@ -31,18 +31,58 @@ use secp256k1;
 
 use ln::msgs::DecodeError;
 use ln::chan_utils;
-use ln::chan_utils::{HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
 use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
 use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
 use chain::transaction::OutPoint;
 use chain::keysinterface::{SpendableOutputDescriptor, ChannelKeys};
 use util::logger::Logger;
-use util::ser::{ReadableArgs, Readable, Writer, Writeable, U48};
+use util::ser::{ReadableArgs, Readable, MaybeReadable, Writer, Writeable, U48};
 use util::{byte_utils, events};
 
 use std::collections::{HashMap, hash_map, HashSet};
 use std::sync::{Arc,Mutex};
 use std::{hash,cmp, mem};
+use std::ops::Deref;
+
+/// An update generated by the underlying Channel itself which contains some new information the
+/// ChannelMonitor should be made aware of.
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+#[must_use]
+pub struct ChannelMonitorUpdate {
+       pub(super) updates: Vec<ChannelMonitorUpdateStep>,
+       /// The sequence number of this update. Updates *must* be replayed in-order according to this
+       /// sequence number (and updates may panic if they are not). The update_id values are strictly
+       /// increasing and increase by one for each new update.
+       ///
+       /// This sequence number is also used to track up to which points updates which returned
+       /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
+       /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
+       pub update_id: u64,
+}
+
+impl Writeable for ChannelMonitorUpdate {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+               self.update_id.write(w)?;
+               (self.updates.len() as u64).write(w)?;
+               for update_step in self.updates.iter() {
+                       update_step.write(w)?;
+               }
+               Ok(())
+       }
+}
+impl<R: ::std::io::Read> Readable<R> for ChannelMonitorUpdate {
+       fn read(r: &mut R) -> Result<Self, DecodeError> {
+               let update_id: u64 = Readable::read(r)?;
+               let len: u64 = Readable::read(r)?;
+               let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<ChannelMonitorUpdateStep>()));
+               for _ in 0..len {
+                       updates.push(Readable::read(r)?);
+               }
+               Ok(Self { update_id, updates })
+       }
+}
 
 /// An error enum representing a failure to persist a channel monitor update.
 #[derive(Clone)]
@@ -51,13 +91,13 @@ pub enum ChannelMonitorUpdateErr {
        /// our state failed, but is expected to succeed at some point in the future).
        ///
        /// Such a failure will "freeze" a channel, preventing us from revoking old states or
-       /// submitting new commitment transactions to the remote party.
-       /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
-       /// the channel to an operational state.
+       /// submitting new commitment transactions to the remote party. Once the update(s) which failed
+       /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
+       /// restore the channel to an operational state.
        ///
-       /// Note that continuing to operate when no copy of the updated ChannelMonitor could be
-       /// persisted is unsafe - if you failed to store the update on your own local disk you should
-       /// instead return PermanentFailure to force closure of the channel ASAP.
+       /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
+       /// you return a TemporaryFailure you must ensure that it is written to disk safely before
+       /// writing out the latest ChannelManager state.
        ///
        /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
        /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
@@ -68,8 +108,15 @@ pub enum ChannelMonitorUpdateErr {
        /// been "frozen".
        ///
        /// Note that even if updates made after TemporaryFailure succeed you must still call
-       /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal
-       /// channel operation.
+       /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
+       /// operation.
+       ///
+       /// Note that the update being processed here will not be replayed for you when you call
+       /// ChannelManager::channel_monitor_updated, so you must store the update itself along
+       /// with the persisted ChannelMonitor on your own local disk prior to returning a
+       /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
+       /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
+       /// reload-time.
        ///
        /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
        /// remote location (with local copies persisted immediately), it is anticipated that all
@@ -84,24 +131,26 @@ pub enum ChannelMonitorUpdateErr {
 }
 
 /// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
-/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::insert_combine this
-/// means you tried to merge two monitors for different channels or for a channel which was
-/// restored from a backup and then generated new commitment updates.
+/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
+/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
+/// corrupted.
 /// Contains a human-readable error message.
 #[derive(Debug)]
 pub struct MonitorUpdateError(pub &'static str);
 
 /// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
 /// forward channel and from which info are needed to update HTLC in a backward channel.
+#[derive(Clone, PartialEq)]
 pub struct HTLCUpdate {
        pub(super) payment_hash: PaymentHash,
        pub(super) payment_preimage: Option<PaymentPreimage>,
        pub(super) source: HTLCSource
 }
+impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
 
 /// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
 /// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
-/// events to it, while also taking any add_update_monitor events and passing them to some remote
+/// events to it, while also taking any add/update_monitor events and passing them to some remote
 /// server(s).
 ///
 /// Note that any updates to a channel's monitor *must* be applied to each instance of the
@@ -115,7 +164,7 @@ pub struct HTLCUpdate {
 /// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
 /// all registered listeners in one go.
 pub trait ManyChannelMonitor<ChanSigner: ChannelKeys>: Send + Sync {
-       /// Adds or updates a monitor for the given `funding_txo`.
+       /// Adds a monitor for the given `funding_txo`.
        ///
        /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
        /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
@@ -126,12 +175,30 @@ pub trait ManyChannelMonitor<ChanSigner: ChannelKeys>: Send + Sync {
        /// any spends of any of the outputs.
        ///
        /// Any spends of outputs which should have been registered which aren't passed to
-       /// ChannelMonitors via block_connected may result in funds loss.
-       fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+       /// ChannelMonitors via block_connected may result in FUNDS LOSS.
+       fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+
+       /// Updates a monitor for the given `funding_txo`.
+       ///
+       /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
+       /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
+       /// callbacks with the funding transaction, or any spends of it.
+       ///
+       /// Further, the implementer must also ensure that each output returned in
+       /// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about
+       /// any spends of any of the outputs.
+       ///
+       /// Any spends of outputs which should have been registered which aren't passed to
+       /// ChannelMonitors via block_connected may result in FUNDS LOSS.
+       fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
 
        /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
-       /// with success or failure backward
-       fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate>;
+       /// with success or failure.
+       ///
+       /// You should probably just call through to
+       /// ChannelMonitor::get_and_clear_pending_htlcs_updated() for each ChannelMonitor and return
+       /// the full list.
+       fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate>;
 }
 
 /// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
@@ -145,72 +212,39 @@ pub trait ManyChannelMonitor<ChanSigner: ChannelKeys>: Send + Sync {
 ///
 /// If you're using this for local monitoring of your own channels, you probably want to use
 /// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
-pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys> {
+pub struct SimpleManyChannelMonitor<Key, ChanSigner: ChannelKeys, T: Deref, F: Deref>
+       where T::Target: BroadcasterInterface,
+        F::Target: FeeEstimator
+{
        #[cfg(test)] // Used in ChannelManager tests to manipulate channels directly
        pub monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
        #[cfg(not(test))]
        monitors: Mutex<HashMap<Key, ChannelMonitor<ChanSigner>>>,
        chain_monitor: Arc<ChainWatchInterface>,
-       broadcaster: Arc<BroadcasterInterface>,
-       pending_events: Mutex<Vec<events::Event>>,
-       pending_htlc_updated: Mutex<HashMap<PaymentHash, Vec<(HTLCSource, Option<PaymentPreimage>)>>>,
+       broadcaster: T,
        logger: Arc<Logger>,
-       fee_estimator: Arc<FeeEstimator>
+       fee_estimator: F
 }
 
-impl<'a, Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys> ChainListener for SimpleManyChannelMonitor<Key, ChanSigner> {
+impl<'a, Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send>
+       ChainListener for SimpleManyChannelMonitor<Key, ChanSigner, T, F>
+       where T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator
+{
        fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[u32]) {
                let block_hash = header.bitcoin_hash();
-               let mut new_events: Vec<events::Event> = Vec::with_capacity(0);
-               let mut htlc_updated_infos = Vec::new();
                {
                        let mut monitors = self.monitors.lock().unwrap();
                        for monitor in monitors.values_mut() {
-                               let (txn_outputs, spendable_outputs, mut htlc_updated) = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
-                               if spendable_outputs.len() > 0 {
-                                       new_events.push(events::Event::SpendableOutputs {
-                                               outputs: spendable_outputs,
-                                       });
-                               }
+                               let txn_outputs = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
 
                                for (ref txid, ref outputs) in txn_outputs {
                                        for (idx, output) in outputs.iter().enumerate() {
                                                self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
                                        }
                                }
-                               htlc_updated_infos.append(&mut htlc_updated);
                        }
                }
-               {
-                       // ChannelManager will just need to fetch pending_htlc_updated and pass state backward
-                       let mut pending_htlc_updated = self.pending_htlc_updated.lock().unwrap();
-                       for htlc in htlc_updated_infos.drain(..) {
-                               match pending_htlc_updated.entry(htlc.2) {
-                                       hash_map::Entry::Occupied(mut e) => {
-                                               // In case of reorg we may have htlc outputs solved in a different way so
-                                               // we prefer to keep claims but don't store duplicate updates for a given
-                                               // (payment_hash, HTLCSource) pair.
-                                               let mut existing_claim = false;
-                                               e.get_mut().retain(|htlc_data| {
-                                                       if htlc.0 == htlc_data.0 {
-                                                               if htlc_data.1.is_some() {
-                                                                       existing_claim = true;
-                                                                       true
-                                                               } else { false }
-                                                       } else { true }
-                                               });
-                                               if !existing_claim {
-                                                       e.get_mut().push((htlc.0, htlc.1));
-                                               }
-                                       }
-                                       hash_map::Entry::Vacant(e) => {
-                                               e.insert(vec![(htlc.0, htlc.1)]);
-                                       }
-                               }
-                       }
-               }
-               let mut pending_events = self.pending_events.lock().unwrap();
-               pending_events.append(&mut new_events);
        }
 
        fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
@@ -222,16 +256,17 @@ impl<'a, Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys> ChainListen
        }
 }
 
-impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys> SimpleManyChannelMonitor<Key, ChanSigner> {
+impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: Deref, F: Deref> SimpleManyChannelMonitor<Key, ChanSigner, T, F>
+       where T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator
+{
        /// Creates a new object which can be used to monitor several channels given the chain
        /// interface with which to register to receive notifications.
-       pub fn new(chain_monitor: Arc<ChainWatchInterface>, broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<FeeEstimator>) -> SimpleManyChannelMonitor<Key, ChanSigner> {
+       pub fn new(chain_monitor: Arc<ChainWatchInterface>, broadcaster: T, logger: Arc<Logger>, feeest: F) -> SimpleManyChannelMonitor<Key, ChanSigner, T, F> {
                let res = SimpleManyChannelMonitor {
                        monitors: Mutex::new(HashMap::new()),
                        chain_monitor,
                        broadcaster,
-                       pending_events: Mutex::new(Vec::new()),
-                       pending_htlc_updated: Mutex::new(HashMap::new()),
                        logger,
                        fee_estimator: feeest,
                };
@@ -240,14 +275,11 @@ impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys> Simpl
        }
 
        /// Adds or updates the monitor which monitors the channel referred to by the given key.
-       pub fn add_update_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
+       pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
                let mut monitors = self.monitors.lock().unwrap();
-               match monitors.get_mut(&key) {
-                       Some(orig_monitor) => {
-                               log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(monitor.key_storage));
-                               return orig_monitor.insert_combine(monitor);
-                       },
-                       None => {}
+               let entry = match monitors.entry(key) {
+                       hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")),
+                       hash_map::Entry::Vacant(e) => e,
                };
                match monitor.key_storage {
                        Storage::Local { ref funding_info, .. } => {
@@ -271,41 +303,60 @@ impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys> Simpl
                                self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script);
                        }
                }
-               monitors.insert(key, monitor);
+               entry.insert(monitor);
                Ok(())
        }
+
+       /// Updates the monitor which monitors the channel referred to by the given key.
+       pub fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+               let mut monitors = self.monitors.lock().unwrap();
+               match monitors.get_mut(&key) {
+                       Some(orig_monitor) => {
+                               log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor.key_storage));
+                               orig_monitor.update_monitor(update)
+                       },
+                       None => Err(MonitorUpdateError("No such monitor registered"))
+               }
+       }
 }
 
-impl<ChanSigner: ChannelKeys> ManyChannelMonitor<ChanSigner> for SimpleManyChannelMonitor<OutPoint, ChanSigner> {
-       fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
-               match self.add_update_monitor_by_key(funding_txo, monitor) {
+impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send, F: Deref + Sync + Send> ManyChannelMonitor<ChanSigner> for SimpleManyChannelMonitor<OutPoint, ChanSigner, T, F>
+       where T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator
+{
+       fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+               match self.add_monitor_by_key(funding_txo, monitor) {
                        Ok(_) => Ok(()),
                        Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
                }
        }
 
-       fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
-               let mut updated = self.pending_htlc_updated.lock().unwrap();
-               let mut pending_htlcs_updated = Vec::with_capacity(updated.len());
-               for (k, v) in updated.drain() {
-                       for htlc_data in v {
-                               pending_htlcs_updated.push(HTLCUpdate {
-                                       payment_hash: k,
-                                       payment_preimage: htlc_data.1,
-                                       source: htlc_data.0,
-                               });
-                       }
+       fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+               match self.update_monitor_by_key(funding_txo, update) {
+                       Ok(_) => Ok(()),
+                       Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
+               }
+       }
+
+       fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+               let mut pending_htlcs_updated = Vec::new();
+               for chan in self.monitors.lock().unwrap().values_mut() {
+                       pending_htlcs_updated.append(&mut chan.get_and_clear_pending_htlcs_updated());
                }
                pending_htlcs_updated
        }
 }
 
-impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner> {
+impl<Key : Send + cmp::Eq + hash::Hash, ChanSigner: ChannelKeys, T: Deref, F: Deref> events::EventsProvider for SimpleManyChannelMonitor<Key, ChanSigner, T, F>
+       where T::Target: BroadcasterInterface,
+             F::Target: FeeEstimator
+{
        fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
-               let mut pending_events = self.pending_events.lock().unwrap();
-               let mut ret = Vec::new();
-               mem::swap(&mut ret, &mut *pending_events);
-               ret
+               let mut pending_events = Vec::new();
+               for chan in self.monitors.lock().unwrap().values_mut() {
+                       pending_events.append(&mut chan.get_and_clear_pending_events());
+               }
+               pending_events
        }
 }
 
@@ -338,7 +389,6 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
 /// keeping bumping another claim tx to solve the outpoint.
 pub(crate) const ANTI_REORG_DELAY: u32 = 6;
 
-#[derive(Clone)]
 enum Storage<ChanSigner: ChannelKeys> {
        Local {
                keys: ChanSigner,
@@ -593,13 +643,153 @@ impl<R: ::std::io::Read> Readable<R> for ClaimTxBumpMaterial {
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+pub(super) enum ChannelMonitorUpdateStep {
+       LatestLocalCommitmentTXInfo {
+               // TODO: We really need to not be generating a fully-signed transaction in Channel and
+               // passing it here, we need to hold off so that the ChanSigner can enforce a
+               // only-sign-local-state-for-broadcast once invariant:
+               commitment_tx: LocalCommitmentTransaction,
+               local_keys: chan_utils::TxCreationKeys,
+               feerate_per_kw: u64,
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+       },
+       LatestRemoteCommitmentTXInfo {
+               unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here
+               htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+               commitment_number: u64,
+               their_revocation_point: PublicKey,
+       },
+       PaymentPreimage {
+               payment_preimage: PaymentPreimage,
+       },
+       CommitmentSecret {
+               idx: u64,
+               secret: [u8; 32],
+       },
+       /// Indicates our channel is likely a stale version, we're closing, but this update should
+       /// allow us to spend what is ours if our counterparty broadcasts their latest state.
+       RescueRemoteCommitmentTXInfo {
+               their_current_per_commitment_point: PublicKey,
+       },
+}
+
+impl Writeable for ChannelMonitorUpdateStep {
+       fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       &ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { ref commitment_tx, ref local_keys, ref feerate_per_kw, ref htlc_outputs } => {
+                               0u8.write(w)?;
+                               commitment_tx.write(w)?;
+                               local_keys.write(w)?;
+                               feerate_per_kw.write(w)?;
+                               (htlc_outputs.len() as u64).write(w)?;
+                               for &(ref output, ref signature, ref source) in htlc_outputs.iter() {
+                                       output.write(w)?;
+                                       signature.write(w)?;
+                                       source.write(w)?;
+                               }
+                       }
+                       &ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => {
+                               1u8.write(w)?;
+                               unsigned_commitment_tx.write(w)?;
+                               commitment_number.write(w)?;
+                               their_revocation_point.write(w)?;
+                               (htlc_outputs.len() as u64).write(w)?;
+                               for &(ref output, ref source) in htlc_outputs.iter() {
+                                       output.write(w)?;
+                                       match source {
+                                               &None => 0u8.write(w)?,
+                                               &Some(ref s) => {
+                                                       1u8.write(w)?;
+                                                       s.write(w)?;
+                                               },
+                                       }
+                               }
+                       },
+                       &ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => {
+                               2u8.write(w)?;
+                               payment_preimage.write(w)?;
+                       },
+                       &ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => {
+                               3u8.write(w)?;
+                               idx.write(w)?;
+                               secret.write(w)?;
+                       },
+                       &ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { ref their_current_per_commitment_point } => {
+                               4u8.write(w)?;
+                               their_current_per_commitment_point.write(w)?;
+                       },
+               }
+               Ok(())
+       }
+}
+impl<R: ::std::io::Read> Readable<R> for ChannelMonitorUpdateStep {
+       fn read(r: &mut R) -> Result<Self, DecodeError> {
+               match Readable::read(r)? {
+                       0u8 => {
+                               Ok(ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+                                       commitment_tx: Readable::read(r)?,
+                                       local_keys: Readable::read(r)?,
+                                       feerate_per_kw: Readable::read(r)?,
+                                       htlc_outputs: {
+                                               let len: u64 = Readable::read(r)?;
+                                               let mut res = Vec::new();
+                                               for _ in 0..len {
+                                                       res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?));
+                                               }
+                                               res
+                                       },
+                               })
+                       },
+                       1u8 => {
+                               Ok(ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo {
+                                       unsigned_commitment_tx: Readable::read(r)?,
+                                       commitment_number: Readable::read(r)?,
+                                       their_revocation_point: Readable::read(r)?,
+                                       htlc_outputs: {
+                                               let len: u64 = Readable::read(r)?;
+                                               let mut res = Vec::new();
+                                               for _ in 0..len {
+                                                       res.push((Readable::read(r)?, <Option<HTLCSource> as Readable<R>>::read(r)?.map(|o| Box::new(o))));
+                                               }
+                                               res
+                                       },
+                               })
+                       },
+                       2u8 => {
+                               Ok(ChannelMonitorUpdateStep::PaymentPreimage {
+                                       payment_preimage: Readable::read(r)?,
+                               })
+                       },
+                       3u8 => {
+                               Ok(ChannelMonitorUpdateStep::CommitmentSecret {
+                                       idx: Readable::read(r)?,
+                                       secret: Readable::read(r)?,
+                               })
+                       },
+                       4u8 => {
+                               Ok(ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo {
+                                       their_current_per_commitment_point: Readable::read(r)?,
+                               })
+                       },
+                       _ => Err(DecodeError::InvalidValue),
+               }
+       }
+}
+
 /// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
 /// on-chain transactions to ensure no loss of funds occurs.
 ///
 /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
 /// information and are actively monitoring the chain.
-#[derive(Clone)]
+///
+/// Pending Events or updated HTLCs which have not yet been read out by
+/// get_and_clear_pending_htlcs_updated or get_and_clear_pending_events are serialized to disk and
+/// reloaded at deserialize-time. Thus, you must ensure that, when handling events, all events
+/// gotten are fully handled before re-serializing the new state.
 pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
+       latest_update_id: u64,
        commitment_transaction_number_obscure_factor: u64,
 
        key_storage: Storage<ChanSigner>,
@@ -613,7 +803,7 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
        our_to_self_delay: u16,
        their_to_self_delay: Option<u16>,
 
-       old_secrets: [([u8; 32], u64); 49],
+       commitment_secrets: CounterpartyCommitmentSecrets,
        remote_claimable_outpoints: HashMap<Sha256dHash, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
        /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
        /// Nor can we figure out their commitment numbers without the commitment transaction they are
@@ -640,6 +830,9 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
 
        payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
 
+       pending_htlcs_updated: Vec<HTLCUpdate>,
+       pending_events: Vec<events::Event>,
+
        destination_script: Script,
        // Thanks to data loss protection, we may be able to claim our non-htlc funds
        // back, this is the script we have to spend from but we need to
@@ -686,14 +879,13 @@ pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
 
        // We simply modify last_block_hash in Channel's block_connected so that serialization is
        // consistent but hopefully the users' copy handles block_connected in a consistent way.
-       // (we do *not*, however, update them in insert_combine to ensure any local user copies keep
+       // (we do *not*, however, update them in update_monitor to ensure any local user copies keep
        // their last_block_hash from its state and not based on updated copies that didn't run through
        // the full block_connected).
        pub(crate) last_block_hash: Sha256dHash,
        secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
        logger: Arc<Logger>,
 }
-
 macro_rules! subtract_high_prio_fee {
        ($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
                {
@@ -734,7 +926,8 @@ macro_rules! subtract_high_prio_fee {
 /// underlying object
 impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
        fn eq(&self, other: &Self) -> bool {
-               if self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
+               if self.latest_update_id != other.latest_update_id ||
+                       self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
                        self.key_storage != other.key_storage ||
                        self.their_htlc_base_key != other.their_htlc_base_key ||
                        self.their_delayed_payment_base_key != other.their_delayed_payment_base_key ||
@@ -743,6 +936,7 @@ impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
                        self.their_cur_revocation_points != other.their_cur_revocation_points ||
                        self.our_to_self_delay != other.our_to_self_delay ||
                        self.their_to_self_delay != other.their_to_self_delay ||
+                       self.commitment_secrets != other.commitment_secrets ||
                        self.remote_claimable_outpoints != other.remote_claimable_outpoints ||
                        self.remote_commitment_txn_on_chain != other.remote_commitment_txn_on_chain ||
                        self.remote_hash_commitment_number != other.remote_hash_commitment_number ||
@@ -750,6 +944,8 @@ impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
                        self.current_remote_commitment_number != other.current_remote_commitment_number ||
                        self.current_local_signed_commitment_tx != other.current_local_signed_commitment_tx ||
                        self.payment_preimages != other.payment_preimages ||
+                       self.pending_htlcs_updated != other.pending_htlcs_updated ||
+                       self.pending_events.len() != other.pending_events.len() || // We trust events to round-trip properly
                        self.destination_script != other.destination_script ||
                        self.to_remote_rescue != other.to_remote_rescue ||
                        self.pending_claim_requests != other.pending_claim_requests ||
@@ -759,11 +955,6 @@ impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
                {
                        false
                } else {
-                       for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
-                               if secret != o_secret || idx != o_idx {
-                                       return false
-                               }
-                       }
                        true
                }
        }
@@ -777,6 +968,8 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
                writer.write_all(&[SERIALIZATION_VERSION; 1])?;
                writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
 
+               self.latest_update_id.write(writer)?;
+
                // Set in initial Channel-object creation, so should always be set by now:
                U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
 
@@ -844,10 +1037,7 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
                writer.write_all(&byte_utils::be16_to_array(self.our_to_self_delay))?;
                writer.write_all(&byte_utils::be16_to_array(self.their_to_self_delay.unwrap()))?;
 
-               for &(ref secret, ref idx) in self.old_secrets.iter() {
-                       writer.write_all(secret)?;
-                       writer.write_all(&byte_utils::be64_to_array(*idx))?;
-               }
+               self.commitment_secrets.write(writer)?;
 
                macro_rules! serialize_htlc_in_commitment {
                        ($htlc_output: expr) => {
@@ -938,6 +1128,16 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
                        writer.write_all(&payment_preimage.0[..])?;
                }
 
+               writer.write_all(&byte_utils::be64_to_array(self.pending_htlcs_updated.len() as u64))?;
+               for data in self.pending_htlcs_updated.iter() {
+                       data.write(writer)?;
+               }
+
+               writer.write_all(&byte_utils::be64_to_array(self.pending_events.len() as u64))?;
+               for event in self.pending_events.iter() {
+                       event.write(writer)?;
+               }
+
                self.last_block_hash.write(writer)?;
                self.destination_script.write(writer)?;
                if let Some((ref to_remote_script, ref local_key)) = self.to_remote_rescue {
@@ -1021,32 +1221,45 @@ impl<ChanSigner: ChannelKeys + Writeable> ChannelMonitor<ChanSigner> {
 }
 
 impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
-       pub(super) fn new(keys: ChanSigner, funding_key: &SecretKey, revocation_base_key: &SecretKey, delayed_payment_base_key: &SecretKey, htlc_base_key: &SecretKey, payment_base_key: &SecretKey, shutdown_pubkey: &PublicKey, our_to_self_delay: u16, destination_script: Script, logger: Arc<Logger>) -> ChannelMonitor<ChanSigner> {
+       pub(super) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey,
+                       our_to_self_delay: u16, destination_script: &Script, funding_info: (OutPoint, Script),
+                       their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey,
+                       their_to_self_delay: u16, funding_redeemscript: Script, channel_value_satoshis: u64,
+                       commitment_transaction_number_obscure_factor: u64,
+                       logger: Arc<Logger>) -> ChannelMonitor<ChanSigner> {
+
+               assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
+               let funding_key = keys.funding_key().clone();
+               let revocation_base_key = keys.revocation_base_key().clone();
+               let htlc_base_key = keys.htlc_base_key().clone();
+               let delayed_payment_base_key = keys.delayed_payment_base_key().clone();
+               let payment_base_key = keys.payment_base_key().clone();
                ChannelMonitor {
-                       commitment_transaction_number_obscure_factor: 0,
+                       latest_update_id: 0,
+                       commitment_transaction_number_obscure_factor,
 
                        key_storage: Storage::Local {
                                keys,
-                               funding_key: funding_key.clone(),
-                               revocation_base_key: revocation_base_key.clone(),
-                               htlc_base_key: htlc_base_key.clone(),
-                               delayed_payment_base_key: delayed_payment_base_key.clone(),
-                               payment_base_key: payment_base_key.clone(),
+                               funding_key,
+                               revocation_base_key,
+                               htlc_base_key,
+                               delayed_payment_base_key,
+                               payment_base_key,
                                shutdown_pubkey: shutdown_pubkey.clone(),
-                               funding_info: None,
+                               funding_info: Some(funding_info),
                                current_remote_commitment_txid: None,
                                prev_remote_commitment_txid: None,
                        },
-                       their_htlc_base_key: None,
-                       their_delayed_payment_base_key: None,
-                       funding_redeemscript: None,
-                       channel_value_satoshis: None,
+                       their_htlc_base_key: Some(their_htlc_base_key.clone()),
+                       their_delayed_payment_base_key: Some(their_delayed_payment_base_key.clone()),
+                       funding_redeemscript: Some(funding_redeemscript),
+                       channel_value_satoshis: Some(channel_value_satoshis),
                        their_cur_revocation_points: None,
 
                        our_to_self_delay: our_to_self_delay,
-                       their_to_self_delay: None,
+                       their_to_self_delay: Some(their_to_self_delay),
 
-                       old_secrets: [([0; 32], 1 << 48); 49],
+                       commitment_secrets: CounterpartyCommitmentSecrets::new(),
                        remote_claimable_outpoints: HashMap::new(),
                        remote_commitment_txn_on_chain: HashMap::new(),
                        remote_hash_commitment_number: HashMap::new(),
@@ -1056,7 +1269,10 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        current_remote_commitment_number: 1 << 48,
 
                        payment_preimages: HashMap::new(),
-                       destination_script: destination_script,
+                       pending_htlcs_updated: Vec::new(),
+                       pending_events: Vec::new(),
+
+                       destination_script: destination_script.clone(),
                        to_remote_rescue: None,
 
                        pending_claim_requests: HashMap::new(),
@@ -1111,48 +1327,16 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                current_height + 15
        }
 
-       #[inline]
-       fn place_secret(idx: u64) -> u8 {
-               for i in 0..48 {
-                       if idx & (1 << i) == (1 << i) {
-                               return i
-                       }
-               }
-               48
-       }
-
-       #[inline]
-       fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
-               let mut res: [u8; 32] = secret;
-               for i in 0..bits {
-                       let bitpos = bits - 1 - i;
-                       if idx & (1 << bitpos) == (1 << bitpos) {
-                               res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7);
-                               res = Sha256::hash(&res).into_inner();
-                       }
-               }
-               res
-       }
-
        /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
        /// needed by local commitment transactions HTCLs nor by remote ones. Unless we haven't already seen remote
        /// commitment transaction's secret, they are de facto pruned (we can use revocation key).
        pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
-               let pos = ChannelMonitor::<ChanSigner>::place_secret(idx);
-               for i in 0..pos {
-                       let (old_secret, old_idx) = self.old_secrets[i as usize];
-                       if ChannelMonitor::<ChanSigner>::derive_secret(secret, pos, old_idx) != old_secret {
-                               return Err(MonitorUpdateError("Previous secret did not match new one"));
-                       }
+               if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
+                       return Err(MonitorUpdateError("Previous secret did not match new one"));
                }
-               if self.get_min_seen_secret() <= idx {
-                       return Ok(());
-               }
-               self.old_secrets[pos as usize] = (secret, idx);
 
                // Prune HTLCs from the previous remote commitment tx so we don't generate failure/fulfill
                // events for now-revoked/fulfilled HTLCs.
-               // TODO: We should probably consider whether we're really getting the next secret here.
                if let Storage::Local { ref mut prev_remote_commitment_txid, .. } = self.key_storage {
                        if let Some(txid) = prev_remote_commitment_txid.take() {
                                for &mut (_, ref mut source) in self.remote_claimable_outpoints.get_mut(&txid).unwrap() {
@@ -1260,8 +1444,10 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// is important that any clones of this channel monitor (including remote clones) by kept
        /// up-to-date as our local commitment transaction is updated.
        /// Panics if set_their_to_self_delay has never been called.
-       pub(super) fn provide_latest_local_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, local_keys: chan_utils::TxCreationKeys, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) {
-               assert!(self.their_to_self_delay.is_some());
+       pub(super) fn provide_latest_local_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, local_keys: chan_utils::TxCreationKeys, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
+               if self.their_to_self_delay.is_none() {
+                       return Err(MonitorUpdateError("Got a local commitment tx info update before we'd set basic information about the channel"));
+               }
                self.prev_local_signed_commitment_tx = self.current_local_signed_commitment_tx.take();
                self.current_local_signed_commitment_tx = Some(LocalSignedTx {
                        txid: commitment_tx.txid(),
@@ -1274,6 +1460,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        feerate_per_kw,
                        htlc_outputs,
                });
+               Ok(())
        }
 
        /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
@@ -1282,106 +1469,56 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
        }
 
-       /// Combines this ChannelMonitor with the information contained in the other ChannelMonitor.
-       /// After a successful call this ChannelMonitor is up-to-date and is safe to use to monitor the
-       /// chain for new blocks/transactions.
-       pub fn insert_combine(&mut self, mut other: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
-               match self.key_storage {
-                       Storage::Local { ref funding_info, .. } => {
-                               if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
-                               let our_funding_info = funding_info;
-                               if let Storage::Local { ref funding_info, .. } = other.key_storage {
-                                       if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
-                                       // We should be able to compare the entire funding_txo, but in fuzztarget it's trivially
-                                       // easy to collide the funding_txo hash and have a different scriptPubKey.
-                                       if funding_info.as_ref().unwrap().0 != our_funding_info.as_ref().unwrap().0 {
-                                               return Err(MonitorUpdateError("Funding transaction outputs are not identical!"));
-                                       }
-                               } else {
-                                       return Err(MonitorUpdateError("Try to combine a Local monitor with a Watchtower one !"));
-                               }
-                       },
-                       Storage::Watchtower { .. } => {
-                               if let Storage::Watchtower { .. } = other.key_storage {
-                                       unimplemented!();
-                               } else {
-                                       return Err(MonitorUpdateError("Try to combine a Watchtower monitor with a Local one !"));
-                               }
-                       },
-               }
-               let other_min_secret = other.get_min_seen_secret();
-               let our_min_secret = self.get_min_seen_secret();
-               if our_min_secret > other_min_secret {
-                       self.provide_secret(other_min_secret, other.get_secret(other_min_secret).unwrap())?;
-               }
-               if let Some(ref local_tx) = self.current_local_signed_commitment_tx {
-                       if let Some(ref other_local_tx) = other.current_local_signed_commitment_tx {
-                               let our_commitment_number = 0xffffffffffff - ((((local_tx.tx.without_valid_witness().input[0].sequence as u64 & 0xffffff) << 3*8) | (local_tx.tx.without_valid_witness().lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
-                               let other_commitment_number = 0xffffffffffff - ((((other_local_tx.tx.without_valid_witness().input[0].sequence as u64 & 0xffffff) << 3*8) | (other_local_tx.tx.without_valid_witness().lock_time as u64 & 0xffffff)) ^ other.commitment_transaction_number_obscure_factor);
-                               if our_commitment_number >= other_commitment_number {
-                                       self.key_storage = other.key_storage;
-                               }
-                       }
-               }
-               // TODO: We should use current_remote_commitment_number and the commitment number out of
-               // local transactions to decide how to merge
-               if our_min_secret >= other_min_secret {
-                       self.their_cur_revocation_points = other.their_cur_revocation_points;
-                       for (txid, htlcs) in other.remote_claimable_outpoints.drain() {
-                               self.remote_claimable_outpoints.insert(txid, htlcs);
-                       }
-                       if let Some(local_tx) = other.prev_local_signed_commitment_tx {
-                               self.prev_local_signed_commitment_tx = Some(local_tx);
-                       }
-                       if let Some(local_tx) = other.current_local_signed_commitment_tx {
-                               self.current_local_signed_commitment_tx = Some(local_tx);
-                       }
-                       self.payment_preimages = other.payment_preimages;
-                       self.to_remote_rescue = other.to_remote_rescue;
-               }
-
-               self.current_remote_commitment_number = cmp::min(self.current_remote_commitment_number, other.current_remote_commitment_number);
+       /// Used in Channel to cheat wrt the update_ids since it plays games, will be removed soon!
+       pub(super) fn update_monitor_ooo(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+               for update in updates.updates.drain(..) {
+                       match update {
+                               ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, local_keys, feerate_per_kw, htlc_outputs } =>
+                                       self.provide_latest_local_commitment_tx_info(commitment_tx, local_keys, feerate_per_kw, htlc_outputs)?,
+                               ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
+                                       self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point),
+                               ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
+                                       self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
+                               ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
+                                       self.provide_secret(idx, secret)?,
+                               ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
+                                       self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+                       }
+               }
+               self.latest_update_id = updates.update_id;
                Ok(())
        }
 
-       /// Allows this monitor to scan only for transactions which are applicable. Note that this is
-       /// optional, without it this monitor cannot be used in an SPV client, but you may wish to
-       /// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
-       /// provides slightly better privacy.
-       /// It's the responsibility of the caller to register outpoint and script with passing the former
-       /// value as key to add_update_monitor.
-       pub(super) fn set_funding_info(&mut self, new_funding_info: (OutPoint, Script)) {
-               match self.key_storage {
-                       Storage::Local { ref mut funding_info, .. } => {
-                               *funding_info = Some(new_funding_info);
-                       },
-                       Storage::Watchtower { .. } => {
-                               panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
-                       }
-               }
+       /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
+       /// itself.
+       ///
+       /// panics if the given update is not the next update by update_id.
+       pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+               if self.latest_update_id + 1 != updates.update_id {
+                       panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
+               }
+               for update in updates.updates.drain(..) {
+                       match update {
+                               ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, local_keys, feerate_per_kw, htlc_outputs } =>
+                                       self.provide_latest_local_commitment_tx_info(commitment_tx, local_keys, feerate_per_kw, htlc_outputs)?,
+                               ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
+                                       self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point),
+                               ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
+                                       self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
+                               ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
+                                       self.provide_secret(idx, secret)?,
+                               ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
+                                       self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+                       }
+               }
+               self.latest_update_id = updates.update_id;
+               Ok(())
        }
 
-       /// We log these base keys at channel opening to being able to rebuild redeemscript in case of leaked revoked commit tx
-       /// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
-       pub(super) fn set_basic_channel_info(&mut self, their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey, their_to_self_delay: u16, funding_redeemscript: Script, channel_value_satoshis: u64, commitment_transaction_number_obscure_factor: u64) {
-               self.their_htlc_base_key = Some(their_htlc_base_key.clone());
-               self.their_delayed_payment_base_key = Some(their_delayed_payment_base_key.clone());
-               self.their_to_self_delay = Some(their_to_self_delay);
-               self.funding_redeemscript = Some(funding_redeemscript);
-               self.channel_value_satoshis = Some(channel_value_satoshis);
-               assert!(commitment_transaction_number_obscure_factor < (1 << 48));
-               self.commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor;
-       }
-
-       pub(super) fn unset_funding_info(&mut self) {
-               match self.key_storage {
-                       Storage::Local { ref mut funding_info, .. } => {
-                               *funding_info = None;
-                       },
-                       Storage::Watchtower { .. } => {
-                               panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
-                       },
-               }
+       /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
+       /// ChannelMonitor.
+       pub fn get_latest_update_id(&self) -> u64 {
+               self.latest_update_id
        }
 
        /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
@@ -1419,26 +1556,33 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                res
        }
 
+       /// Get the list of HTLCs who's status has been updated on chain. This should be called by
+       /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_htlcs_updated().
+       pub fn get_and_clear_pending_htlcs_updated(&mut self) -> Vec<HTLCUpdate> {
+               let mut ret = Vec::new();
+               mem::swap(&mut ret, &mut self.pending_htlcs_updated);
+               ret
+       }
+
+       /// Gets the list of pending events which were generated by previous actions, clearing the list
+       /// in the process.
+       ///
+       /// This is called by ManyChannelMonitor::get_and_clear_pending_events() and is equivalent to
+       /// EventsProvider::get_and_clear_pending_events() except that it requires &mut self as we do
+       /// no internal locking in ChannelMonitors.
+       pub fn get_and_clear_pending_events(&mut self) -> Vec<events::Event> {
+               let mut ret = Vec::new();
+               mem::swap(&mut ret, &mut self.pending_events);
+               ret
+       }
+
        /// Can only fail if idx is < get_min_seen_secret
        pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
-               for i in 0..self.old_secrets.len() {
-                       if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
-                               return Some(ChannelMonitor::<ChanSigner>::derive_secret(self.old_secrets[i].0, i as u8, idx))
-                       }
-               }
-               assert!(idx < self.get_min_seen_secret());
-               None
+               self.commitment_secrets.get_secret(idx)
        }
 
        pub(super) fn get_min_seen_secret(&self) -> u64 {
-               //TODO This can be optimized?
-               let mut min = 1 << 48;
-               for &(_, idx) in self.old_secrets.iter() {
-                       if idx < min {
-                               min = idx;
-                       }
-               }
-               min
+               self.commitment_secrets.get_min_seen_secret()
        }
 
        pub(super) fn get_cur_remote_commitment_number(&self) -> u64 {
@@ -1457,7 +1601,9 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// HTLC-Success/HTLC-Timeout transactions.
        /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
        /// revoked remote commitment tx
-       fn check_spend_remote_transaction(&mut self, tx: &Transaction, height: u32, fee_estimator: &FeeEstimator) -> (Vec<Transaction>, (Sha256dHash, Vec<TxOut>), Vec<SpendableOutputDescriptor>) {
+       fn check_spend_remote_transaction<F: Deref>(&mut self, tx: &Transaction, height: u32, fee_estimator: F) -> (Vec<Transaction>, (Sha256dHash, Vec<TxOut>), Vec<SpendableOutputDescriptor>)
+               where F::Target: FeeEstimator
+       {
                // Most secp and related errors trying to create keys means we have no hope of constructing
                // a spend transaction...so we return no transactions to broadcast
                let mut txn_to_broadcast = Vec::new();
@@ -2032,7 +2178,9 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        }
 
        /// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
-       fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, fee_estimator: &FeeEstimator) -> (Option<Transaction>, Option<SpendableOutputDescriptor>) {
+       fn check_spend_remote_htlc<F: Deref>(&mut self, tx: &Transaction, commitment_number: u64, height: u32, fee_estimator: F) -> (Option<Transaction>, Option<SpendableOutputDescriptor>)
+               where F::Target: FeeEstimator
+       {
                //TODO: send back new outputs to guarantee pending_claim_request consistency
                if tx.input.len() != 1 || tx.output.len() != 1 {
                        return (None, None)
@@ -2402,7 +2550,10 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// Eventually this should be pub and, roughly, implement ChainListener, however this requires
        /// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of
        /// on-chain.
-       fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>, Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)>) {
+       fn block_connected<B: Deref, F: Deref>(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: B, fee_estimator: F)-> Vec<(Sha256dHash, Vec<TxOut>)>
+               where B::Target: BroadcasterInterface,
+                     F::Target: FeeEstimator
+       {
                for tx in txn_matched {
                        let mut output_val = 0;
                        for out in tx.output.iter() {
@@ -2415,7 +2566,6 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                log_trace!(self, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
                let mut watch_outputs = Vec::new();
                let mut spendable_outputs = Vec::new();
-               let mut htlc_updated = Vec::new();
                let mut bump_candidates = HashSet::new();
                for tx in txn_matched {
                        if tx.input.len() == 1 {
@@ -2435,7 +2585,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                };
                                if funding_txo.is_none() || (prevout.txid == funding_txo.as_ref().unwrap().0.txid && prevout.vout == funding_txo.as_ref().unwrap().0.index as u32) {
                                        if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
-                                               let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(&tx, height, fee_estimator);
+                                               let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(&tx, height, &*fee_estimator);
                                                txn = remote_txn;
                                                spendable_outputs.append(&mut spendable_output);
                                                if !new_outputs.1.is_empty() {
@@ -2457,7 +2607,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                        }
                                } else {
                                        if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) {
-                                               let (tx, spendable_output) = self.check_spend_remote_htlc(&tx, commitment_number, height, fee_estimator);
+                                               let (tx, spendable_output) = self.check_spend_remote_htlc(&tx, commitment_number, height, &*fee_estimator);
                                                if let Some(tx) = tx {
                                                        txn.push(tx);
                                                }
@@ -2474,10 +2624,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
                        // can also be resolved in a few other ways which can have more than one output. Thus,
                        // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
-                       let mut updated = self.is_resolving_htlc_output(&tx, height);
-                       if updated.len() > 0 {
-                               htlc_updated.append(&mut updated);
-                       }
+                       self.is_resolving_htlc_output(&tx, height);
 
                        // Scan all input to verify is one of the outpoint spent is of interest for us
                        let mut claimed_outputs_material = Vec::new();
@@ -2600,7 +2747,11 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                        },
                                        OnchainEvent::HTLCUpdate { htlc_update } => {
                                                log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
-                                               htlc_updated.push((htlc_update.0, None, htlc_update.1));
+                                               self.pending_htlcs_updated.push(HTLCUpdate {
+                                                       payment_hash: htlc_update.1,
+                                                       payment_preimage: None,
+                                                       source: htlc_update.0,
+                                               });
                                        },
                                        OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
                                                self.claimable_outpoints.remove(&outpoint);
@@ -2616,7 +2767,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                for first_claim_txid in bump_candidates.iter() {
                        if let Some((new_timer, new_feerate)) = {
                                if let Some(claim_material) = self.pending_claim_requests.get(first_claim_txid) {
-                                       if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) {
+                                       if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, &*fee_estimator) {
                                                broadcaster.broadcast_transaction(&bump_tx);
                                                Some((new_timer, new_feerate))
                                        } else { None }
@@ -2632,10 +2783,20 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                for &(ref txid, ref output_scripts) in watch_outputs.iter() {
                        self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect());
                }
-               (watch_outputs, spendable_outputs, htlc_updated)
+
+               if spendable_outputs.len() > 0 {
+                       self.pending_events.push(events::Event::SpendableOutputs {
+                               outputs: spendable_outputs,
+                       });
+               }
+
+               watch_outputs
        }
 
-       fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator) {
+       fn block_disconnected<B: Deref, F: Deref>(&mut self, height: u32, block_hash: &Sha256dHash, broadcaster: B, fee_estimator: F)
+               where B::Target: BroadcasterInterface,
+                     F::Target: FeeEstimator
+       {
                log_trace!(self, "Block {} at height {} disconnected", block_hash, height);
                let mut bump_candidates = HashMap::new();
                if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
@@ -2660,7 +2821,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        }
                }
                for (_, claim_material) in bump_candidates.iter_mut() {
-                       if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, fee_estimator) {
+                       if let Some((new_timer, new_feerate, bump_tx)) = self.bump_claim_tx(height, &claim_material, &*fee_estimator) {
                                claim_material.height_timer = new_timer;
                                claim_material.feerate_previous = new_feerate;
                                broadcaster.broadcast_transaction(&bump_tx);
@@ -2752,9 +2913,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
 
        /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a local
        /// or remote commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
-       fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32) -> Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)> {
-               let mut htlc_updated = Vec::new();
-
+       fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32) {
                'outer_loop: for input in &tx.input {
                        let mut payment_data = None;
                        let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
@@ -2854,10 +3013,18 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                let mut payment_preimage = PaymentPreimage([0; 32]);
                                if accepted_preimage_claim {
                                        payment_preimage.0.copy_from_slice(&input.witness[3]);
-                                       htlc_updated.push((source, Some(payment_preimage), payment_hash));
+                                       self.pending_htlcs_updated.push(HTLCUpdate {
+                                               source,
+                                               payment_preimage: Some(payment_preimage),
+                                               payment_hash
+                                       });
                                } else if offered_preimage_claim {
                                        payment_preimage.0.copy_from_slice(&input.witness[1]);
-                                       htlc_updated.push((source, Some(payment_preimage), payment_hash));
+                                       self.pending_htlcs_updated.push(HTLCUpdate {
+                                               source,
+                                               payment_preimage: Some(payment_preimage),
+                                               payment_hash
+                                       });
                                } else {
                                        log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
                                        match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
@@ -2880,12 +3047,13 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                }
                        }
                }
-               htlc_updated
        }
 
        /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
        /// (CSV or CLTV following cases). In case of high-fee spikes, claim tx may stuck in the mempool, so you need to bump its feerate quickly using Replace-By-Fee or Child-Pay-For-Parent.
-       fn bump_claim_tx(&self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: &FeeEstimator) -> Option<(u32, u64, Transaction)> {
+       fn bump_claim_tx<F: Deref>(&self, height: u32, cached_claim_datas: &ClaimTxBumpMaterial, fee_estimator: F) -> Option<(u32, u64, Transaction)>
+               where F::Target: FeeEstimator
+       {
                if cached_claim_datas.per_input_material.len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
                let mut inputs = Vec::new();
                for outp in cached_claim_datas.per_input_material.keys() {
@@ -3041,6 +3209,7 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        return Err(DecodeError::UnknownVersion);
                }
 
+               let latest_update_id: u64 = Readable::read(reader)?;
                let commitment_transaction_number_obscure_factor = <U48 as Readable<R>>::read(reader)?.0;
 
                let key_storage = match <u8 as Readable<R>>::read(reader)? {
@@ -3100,11 +3269,7 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                let our_to_self_delay: u16 = Readable::read(reader)?;
                let their_to_self_delay: Option<u16> = Some(Readable::read(reader)?);
 
-               let mut old_secrets = [([0; 32], 1 << 48); 49];
-               for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
-                       *secret = Readable::read(reader)?;
-                       *idx = Readable::read(reader)?;
-               }
+               let commitment_secrets = Readable::read(reader)?;
 
                macro_rules! read_htlc_in_commitment {
                        () => {
@@ -3221,6 +3386,20 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        }
                }
 
+               let pending_htlcs_updated_len: u64 = Readable::read(reader)?;
+               let mut pending_htlcs_updated = Vec::with_capacity(cmp::min(pending_htlcs_updated_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)));
+               for _ in 0..pending_htlcs_updated_len {
+                       pending_htlcs_updated.push(Readable::read(reader)?);
+               }
+
+               let pending_events_len: u64 = Readable::read(reader)?;
+               let mut pending_events = Vec::with_capacity(cmp::min(pending_events_len as usize, MAX_ALLOC_SIZE / mem::size_of::<events::Event>()));
+               for _ in 0..pending_events_len {
+                       if let Some(event) = MaybeReadable::read(reader)? {
+                               pending_events.push(event);
+                       }
+               }
+
                let last_block_hash: Sha256dHash = Readable::read(reader)?;
                let destination_script = Readable::read(reader)?;
                let to_remote_rescue = match <u8 as Readable<R>>::read(reader)? {
@@ -3299,6 +3478,7 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                }
 
                Ok((last_block_hash.clone(), ChannelMonitor {
+                       latest_update_id,
                        commitment_transaction_number_obscure_factor,
 
                        key_storage,
@@ -3311,7 +3491,7 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        our_to_self_delay,
                        their_to_self_delay,
 
-                       old_secrets,
+                       commitment_secrets,
                        remote_claimable_outpoints,
                        remote_commitment_txn_on_chain,
                        remote_hash_commitment_number,
@@ -3321,6 +3501,8 @@ impl<R: ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        current_remote_commitment_number,
 
                        payment_preimages,
+                       pending_htlcs_updated,
+                       pending_events,
 
                        destination_script,
                        to_remote_rescue,
@@ -3352,6 +3534,7 @@ mod tests {
        use bitcoin_hashes::sha256d::Hash as Sha256dHash;
        use bitcoin_hashes::hex::FromHex;
        use hex;
+       use chain::transaction::OutPoint;
        use ln::channelmanager::{PaymentPreimage, PaymentHash};
        use ln::channelmonitor::{ChannelMonitor, InputDescriptors};
        use ln::chan_utils;
@@ -3363,373 +3546,6 @@ mod tests {
        use std::sync::Arc;
        use chain::keysinterface::InMemoryChannelKeys;
 
-
-       #[test]
-       fn test_per_commitment_storage() {
-               // Test vectors from BOLT 3:
-               let mut secrets: Vec<[u8; 32]> = Vec::new();
-               let mut monitor: ChannelMonitor<InMemoryChannelKeys>;
-               let secp_ctx = Secp256k1::new();
-               let logger = Arc::new(TestLogger::new());
-
-               macro_rules! test_secrets {
-                       () => {
-                               let mut idx = 281474976710655;
-                               for secret in secrets.iter() {
-                                       assert_eq!(monitor.get_secret(idx).unwrap(), *secret);
-                                       idx -= 1;
-                               }
-                               assert_eq!(monitor.get_min_seen_secret(), idx + 1);
-                               assert!(monitor.get_secret(idx).is_none());
-                       };
-               }
-
-               let keys = InMemoryChannelKeys::new(
-                       &secp_ctx,
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       SecretKey::from_slice(&[41; 32]).unwrap(),
-                       [41; 32],
-                       0,
-               );
-
-               {
-                       // insert_secret correct sequence
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
-                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
-                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
-                       monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-               }
-
-               {
-                       // insert_secret #1 incorrect
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #2 incorrect (#1 derived from incorrect)
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #3 incorrect
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #4 incorrect (1,2,3 derived from incorrect)
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
-                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
-                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #5 incorrect
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #6 incorrect (5 derived from incorrect)
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap());
-                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
-                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #7 incorrect
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
-                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap());
-                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-
-               {
-                       // insert_secret #8 incorrect
-                       monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
-                       secrets.clear();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
-                       monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
-                       monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
-                       monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
-                       monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
-                       monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
-                       monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
-                       monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
-                       test_secrets!();
-
-                       secrets.push([0; 32]);
-                       secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap());
-                       assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
-                                       "Previous secret did not match new one");
-               }
-       }
-
        #[test]
        fn test_prune_preimages() {
                let secp_ctx = Secp256k1::new();
@@ -3811,10 +3627,16 @@ mod tests {
 
                // Prune with one old state and a local commitment tx holding a few overlaps with the
                // old state.
-               let mut monitor = ChannelMonitor::new(keys, &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+               let mut monitor = ChannelMonitor::new(keys,
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0, &Script::new(),
+                       (OutPoint { txid: Sha256dHash::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+                       &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()),
+                       0, Script::new(), 46, 0, logger.clone());
+
                monitor.their_to_self_delay = Some(10);
 
-               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10]));
+               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10])).unwrap();
                monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key);
                monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key);
                monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key);
@@ -3840,7 +3662,7 @@ mod tests {
 
                // Now update local commitment tx info, pruning only element 18 as we still care about the
                // previous commitment tx's preimages too
-               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..5]));
+               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..5])).unwrap();
                secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
                monitor.provide_secret(281474976710653, secret.clone()).unwrap();
                assert_eq!(monitor.payment_preimages.len(), 12);
@@ -3848,7 +3670,7 @@ mod tests {
                test_preimages_exist!(&preimages[18..20], monitor);
 
                // But if we do it again, we'll prune 5-10
-               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..3]));
+               monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..3])).unwrap();
                secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
                monitor.provide_secret(281474976710652, secret.clone()).unwrap();
                assert_eq!(monitor.payment_preimages.len(), 5);
index 9acbc7eddbf7fdd0c45a37c37a00b51bcdf56ea2..cb210d45d59dd112ddbcfa4833f4d1d01651e3d7 100644 (file)
@@ -60,23 +60,28 @@ pub fn connect_blocks<'a, 'b>(notifier: &'a chaininterface::BlockNotifierRef<'b>
        header.bitcoin_hash()
 }
 
-pub struct NodeCfg {
+pub struct TestChanMonCfg {
+       pub tx_broadcaster: test_utils::TestBroadcaster,
+       pub fee_estimator: test_utils::TestFeeEstimator,
+}
+
+pub struct NodeCfg<'a> {
        pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
-       pub tx_broadcaster: Arc<test_utils::TestBroadcaster>,
-       pub fee_estimator: Arc<test_utils::TestFeeEstimator>,
-       pub chan_monitor: test_utils::TestChannelMonitor,
-       pub keys_manager: Arc<test_utils::TestKeysInterface>,
+       pub tx_broadcaster: &'a test_utils::TestBroadcaster,
+       pub fee_estimator: &'a test_utils::TestFeeEstimator,
+       pub chan_monitor: test_utils::TestChannelMonitor<'a>,
+       pub keys_manager: test_utils::TestKeysInterface,
        pub logger: Arc<test_utils::TestLogger>,
        pub node_seed: [u8; 32],
 }
 
-pub struct Node<'a, 'b: 'a> {
-       pub block_notifier: chaininterface::BlockNotifierRef<'b>,
+pub struct Node<'a, 'b: 'a, 'c: 'b> {
+       pub block_notifier: chaininterface::BlockNotifierRef<'a>,
        pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
-       pub tx_broadcaster: Arc<test_utils::TestBroadcaster>,
-       pub chan_monitor: &'b test_utils::TestChannelMonitor,
-       pub keys_manager: Arc<test_utils::TestKeysInterface>,
-       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor>,
+       pub tx_broadcaster: &'c test_utils::TestBroadcaster,
+       pub chan_monitor: &'b test_utils::TestChannelMonitor<'c>,
+       pub keys_manager: &'b test_utils::TestKeysInterface,
+       pub node: &'a ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator>,
        pub router: Router,
        pub node_seed: [u8; 32],
        pub network_payment_count: Rc<RefCell<u8>>,
@@ -84,7 +89,7 @@ pub struct Node<'a, 'b: 'a> {
        pub logger: Arc<test_utils::TestLogger>
 }
 
-impl<'a, 'b> Drop for Node<'a, 'b> {
+impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
        fn drop(&mut self) {
                if !::std::thread::panicking() {
                        // Check that we processed all pending events
@@ -95,7 +100,7 @@ impl<'a, 'b> Drop for Node<'a, 'b> {
                        // Check that if we serialize and then deserialize all our channel monitors we get the
                        // same set of outputs to watch for on chain as we have now. Note that if we write
                        // tests that fully close channels and remove the monitors at some point this may break.
-                       let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+                       let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
                        let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
                        let mut deserialized_monitors = Vec::new();
                        for (_, old_monitor) in old_monitors.iter() {
@@ -116,10 +121,10 @@ impl<'a, 'b> Drop for Node<'a, 'b> {
 
                                let mut w = test_utils::TestVecWriter(Vec::new());
                                self.node.write(&mut w).unwrap();
-                               <(Sha256d, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
+                               <(Sha256d, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>)>::read(&mut ::std::io::Cursor::new(w.0), ChannelManagerReadArgs {
                                        default_config: UserConfig::default(),
-                                       keys_manager: self.keys_manager.clone(),
-                                       fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+                                       keys_manager: self.keys_manager,
+                                       fee_estimator: &test_utils::TestFeeEstimator { sat_per_kw: 253 },
                                        monitor: self.chan_monitor,
                                        tx_broadcaster: self.tx_broadcaster.clone(),
                                        logger: Arc::new(test_utils::TestLogger::new()),
@@ -128,9 +133,9 @@ impl<'a, 'b> Drop for Node<'a, 'b> {
                        }
 
                        let chain_watch = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&self.logger) as Arc<Logger>));
-                       let channel_monitor = test_utils::TestChannelMonitor::new(chain_watch.clone(), self.tx_broadcaster.clone(), self.logger.clone(), feeest);
+                       let channel_monitor = test_utils::TestChannelMonitor::new(chain_watch.clone(), self.tx_broadcaster.clone(), self.logger.clone(), &feeest);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
-                               if let Err(_) = channel_monitor.add_update_monitor(deserialized_monitor.get_funding_txo().unwrap(), deserialized_monitor) {
+                               if let Err(_) = channel_monitor.add_monitor(deserialized_monitor.get_funding_txo().unwrap(), deserialized_monitor) {
                                        panic!();
                                }
                        }
@@ -141,11 +146,11 @@ impl<'a, 'b> Drop for Node<'a, 'b> {
        }
 }
 
-pub fn create_chan_between_nodes<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
        create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags)
 }
 
-pub fn create_chan_between_nodes_with_value<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
        let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
        (announcement, as_update, bs_update, channel_id, tx)
@@ -220,7 +225,7 @@ macro_rules! get_feerate {
        }
 }
 
-pub fn create_funding_transaction<'a, 'b>(node: &Node<'a, 'b>, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
+pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
        let chan_id = *node.network_chan_count.borrow();
 
        let events = node.node.get_and_clear_pending_events();
@@ -240,7 +245,7 @@ pub fn create_funding_transaction<'a, 'b>(node: &Node<'a, 'b>, expected_chan_val
        }
 }
 
-pub fn create_chan_between_nodes_with_value_init<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction {
+pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> Transaction {
        node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
        node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
        node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
@@ -284,12 +289,12 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b>(node_a: &Node<'a, 'b>,
        tx
 }
 
-pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c>(node_recv: &'a Node<'a, 'b>, node_conf: &'a Node<'a, 'b>, tx: &Transaction) {
+pub fn create_chan_between_nodes_with_value_confirm_first<'a, 'b, 'c, 'd>(node_recv: &'a Node<'b, 'c, 'c>, node_conf: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
        confirm_transaction(&node_conf.block_notifier, &node_conf.chain_monitor, &tx, tx.version);
        node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id()));
 }
 
-pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b>(node_recv: &Node<'a, 'b>, node_conf: &Node<'a, 'b>) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b, 'c>(node_recv: &Node<'a, 'b, 'c>, node_conf: &Node<'a, 'b, 'c>) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
        let channel_id;
        let events_6 = node_conf.node.get_and_clear_pending_msg_events();
        assert_eq!(events_6.len(), 2);
@@ -309,19 +314,19 @@ pub fn create_chan_between_nodes_with_value_confirm_second<'a, 'b>(node_recv: &N
        }), channel_id)
 }
 
-pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+pub fn create_chan_between_nodes_with_value_confirm<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
        create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
        confirm_transaction(&node_a.block_notifier, &node_a.chain_monitor, &tx, tx.version);
        create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
 }
 
-pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c>(node_a: &'a Node<'b, 'c>, node_b: &'a Node<'b, 'c>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+pub fn create_chan_between_nodes_with_value_a<'a, 'b, 'c, 'd>(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
        let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
        let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
        (msgs, chan_id, tx)
 }
 
-pub fn create_chan_between_nodes_with_value_b<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
+pub fn create_chan_between_nodes_with_value_b<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
        node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0);
        let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
        node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1);
@@ -353,11 +358,11 @@ pub fn create_chan_between_nodes_with_value_b<'a, 'b>(node_a: &Node<'a, 'b>, nod
        ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
 }
 
-pub fn create_announced_chan_between_nodes<'a, 'b, 'c>(nodes: &'a Vec<Node<'b, 'c>>, a: usize, b: usize, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
        create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
 }
 
-pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c>(nodes: &'a Vec<Node<'b, 'c>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+pub fn create_announced_chan_between_nodes_with_value<'a, 'b, 'c, 'd>(nodes: &'a Vec<Node<'b, 'c, 'd>>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: InitFeatures, b_flags: InitFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
        let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);
        for node in nodes {
                assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
@@ -427,7 +432,7 @@ macro_rules! check_closed_broadcast {
        }}
 }
 
-pub fn close_channel<'a, 'b>(outbound_node: &Node<'a, 'b>, inbound_node: &Node<'a, 'b>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
+pub fn close_channel<'a, 'b, 'c>(outbound_node: &Node<'a, 'b, 'c>, inbound_node: &Node<'a, 'b, 'c>, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
        let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
        let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
        let (tx_a, tx_b);
@@ -514,7 +519,7 @@ impl SendEvent {
                }
        }
 
-       pub fn from_node<'a, 'b>(node: &Node<'a, 'b>) -> SendEvent {
+       pub fn from_node<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>) -> SendEvent {
                let mut events = node.node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                SendEvent::from_event(events.pop().unwrap())
@@ -662,7 +667,21 @@ macro_rules! expect_payment_sent {
        }
 }
 
-pub fn send_along_route_with_hash<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64, our_payment_hash: PaymentHash) {
+macro_rules! expect_payment_failed {
+       ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr) => {
+               let events = $node.node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentFailed { ref payment_hash, rejected_by_dest, .. } => {
+                               assert_eq!(*payment_hash, $expected_payment_hash);
+                               assert_eq!(rejected_by_dest, $rejected_by_dest);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+       }
+}
+
+pub fn send_along_route_with_hash<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash) {
        let mut payment_event = {
                origin_node.node.send_payment(route, our_payment_hash).unwrap();
                check_added_monitors!(origin_node, 1);
@@ -704,13 +723,13 @@ pub fn send_along_route_with_hash<'a, 'b>(origin_node: &Node<'a, 'b>, route: Rou
        }
 }
 
-pub fn send_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
+pub fn send_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, route: Route, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
        let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
        send_along_route_with_hash(origin_node, route, expected_route, recv_value, our_payment_hash);
        (our_payment_preimage, our_payment_hash)
 }
 
-pub fn claim_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], skip_last: bool, our_payment_preimage: PaymentPreimage, expected_amount: u64) {
+pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], skip_last: bool, our_payment_preimage: PaymentPreimage, expected_amount: u64) {
        assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage, expected_amount));
        check_added_monitors!(expected_route.last().unwrap(), 1);
 
@@ -788,13 +807,13 @@ pub fn claim_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_ro
        }
 }
 
-pub fn claim_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], our_payment_preimage: PaymentPreimage, expected_amount: u64) {
+pub fn claim_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], our_payment_preimage: PaymentPreimage, expected_amount: u64) {
        claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage, expected_amount);
 }
 
 pub const TEST_FINAL_CLTV: u32 = 32;
 
-pub fn route_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
+pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
        let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
        assert_eq!(route.hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
@@ -804,7 +823,7 @@ pub fn route_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node
        send_along_route(origin_node, route, expected_route, recv_value)
 }
 
-pub fn route_over_limit<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64)  {
+pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64)  {
        let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
        assert_eq!(route.hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
@@ -820,12 +839,12 @@ pub fn route_over_limit<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&N
        };
 }
 
-pub fn send_payment<'a, 'b>(origin: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], recv_value: u64, expected_value: u64)  {
+pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64, expected_value: u64)  {
        let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
        claim_payment(&origin, expected_route, our_payment_preimage, expected_value);
 }
 
-pub fn fail_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], skip_last: bool, our_payment_hash: PaymentHash)  {
+pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], skip_last: bool, our_payment_hash: PaymentHash)  {
        assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
        expect_pending_htlcs_forwardable!(expected_route.last().unwrap());
        check_added_monitors!(expected_route.last().unwrap(), 1);
@@ -894,43 +913,52 @@ pub fn fail_payment_along_route<'a, 'b>(origin_node: &Node<'a, 'b>, expected_rou
        }
 }
 
-pub fn fail_payment<'a, 'b>(origin_node: &Node<'a, 'b>, expected_route: &[&Node<'a, 'b>], our_payment_hash: PaymentHash)  {
+pub fn fail_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], our_payment_hash: PaymentHash)  {
        fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
 }
 
-pub fn create_node_cfgs(node_count: usize) -> Vec<NodeCfg> {
+pub fn create_chanmon_cfgs(node_count: usize) -> Vec<TestChanMonCfg> {
+       let mut chan_mon_cfgs = Vec::new();
+       for _ in 0..node_count {
+               let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), broadcasted_txn: Mutex::new(HashSet::new())};
+               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+               chan_mon_cfgs.push(TestChanMonCfg{ tx_broadcaster, fee_estimator });
+       }
+
+       chan_mon_cfgs
+}
+
+pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec<TestChanMonCfg>) -> Vec<NodeCfg<'a>> {
        let mut nodes = Vec::new();
        let mut rng = thread_rng();
 
        for i in 0..node_count {
                let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
-               let fee_estimator = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
                let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, logger.clone() as Arc<Logger>));
-               let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), broadcasted_txn: Mutex::new(HashSet::new())});
                let mut seed = [0; 32];
                rng.fill_bytes(&mut seed);
-               let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet, logger.clone() as Arc<Logger>));
-               let chan_monitor = test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone());
-               nodes.push(NodeCfg { chain_monitor, logger, tx_broadcasterfee_estimator, chan_monitor, keys_manager, node_seed: seed });
+               let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet, logger.clone() as Arc<Logger>);
+               let chan_monitor = test_utils::TestChannelMonitor::new(chain_monitor.clone(), &chanmon_cfgs[i].tx_broadcaster, logger.clone(), &chanmon_cfgs[i].fee_estimator);
+               nodes.push(NodeCfg { chain_monitor, logger, tx_broadcaster: &chanmon_cfgs[i].tx_broadcaster, fee_estimator: &chanmon_cfgs[i].fee_estimator, chan_monitor, keys_manager, node_seed: seed });
        }
 
        nodes
 }
 
-pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor>> {
+pub fn create_node_chanmgrs<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg<'b>>, node_config: &[Option<UserConfig>]) -> Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor<'b>, &'b test_utils::TestBroadcaster, &'a test_utils::TestKeysInterface, &'b test_utils::TestFeeEstimator>> {
        let mut chanmgrs = Vec::new();
        for i in 0..node_count {
                let mut default_config = UserConfig::default();
                default_config.channel_options.announced_channel = true;
                default_config.peer_channel_config_limits.force_announced_channel_preference = false;
-               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator.clone(), &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster.clone(), cfgs[i].logger.clone(), cfgs[i].keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0).unwrap();
+               let node = ChannelManager::new(Network::Testnet, cfgs[i].fee_estimator, &cfgs[i].chan_monitor, cfgs[i].tx_broadcaster, cfgs[i].logger.clone(), &cfgs[i].keys_manager, if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0).unwrap();
                chanmgrs.push(node);
        }
 
        chanmgrs
 }
 
-pub fn create_network<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg>, chan_mgrs: &'b Vec<ChannelManager<EnforcingChannelKeys, &'a TestChannelMonitor>>) -> Vec<Node<'a, 'b>> {
+pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeCfg<'c>>, chan_mgrs: &'a Vec<ChannelManager<EnforcingChannelKeys, &'b TestChannelMonitor<'c>, &'c test_utils::TestBroadcaster, &'b test_utils::TestKeysInterface, &'c test_utils::TestFeeEstimator>>) -> Vec<Node<'a, 'b, 'c>> {
        let secp_ctx = Secp256k1::new();
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
@@ -942,8 +970,8 @@ pub fn create_network<'a, 'b>(node_count: usize, cfgs: &'a Vec<NodeCfg>, chan_mg
                block_notifier.register_listener(&chan_mgrs[i] as &chaininterface::ChainListener);
                let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &cfgs[i].keys_manager.get_node_secret()), cfgs[i].chain_monitor.clone(), cfgs[i].logger.clone() as Arc<Logger>);
                nodes.push(Node{ chain_monitor: cfgs[i].chain_monitor.clone(), block_notifier,
-                                                                                tx_broadcaster: cfgs[i].tx_broadcaster.clone(), chan_monitor: &cfgs[i].chan_monitor,
-                                                                                keys_manager: cfgs[i].keys_manager.clone(), node: &chan_mgrs[i], router,
+                                                                                tx_broadcaster: cfgs[i].tx_broadcaster, chan_monitor: &cfgs[i].chan_monitor,
+                                                                                keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], router,
                                                                                 node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
                                                                                 network_payment_count: payment_count.clone(), logger: cfgs[i].logger.clone(),
                })
@@ -968,7 +996,7 @@ pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
 ///
 /// All broadcast transactions must be accounted for in one of the above three types of we'll
 /// also fail.
-pub fn test_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
+pub fn test_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
 
@@ -1013,7 +1041,7 @@ pub fn test_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, chan: &(msgs::ChannelUpda
 
 /// Tests that the given node has broadcast a claim transaction against the provided revoked
 /// HTLC transaction.
-pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, revoked_tx: Transaction, commitment_revoked_tx: Transaction)  {
+pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, revoked_tx: Transaction, commitment_revoked_tx: Transaction)  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
        // We should issue a 2nd transaction if one htlc is dropped from initial claiming tx
        // but sometimes not as feerate is too-low
@@ -1031,7 +1059,7 @@ pub fn test_revoked_htlc_claim_txn_broadcast<'a, 'b>(node: &Node<'a, 'b>, revoke
        assert!(node_txn.is_empty());
 }
 
-pub fn check_preimage_claim<'a, 'b>(node: &Node<'a, 'b>, prev_txn: &Vec<Transaction>) -> Vec<Transaction>  {
+pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<Transaction>) -> Vec<Transaction>  {
        let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert!(node_txn.len() >= 1);
@@ -1055,7 +1083,7 @@ pub fn check_preimage_claim<'a, 'b>(node: &Node<'a, 'b>, prev_txn: &Vec<Transact
        res
 }
 
-pub fn get_announce_close_broadcast_events<'a, 'b>(nodes: &Vec<Node<'a, 'b>>, a: usize, b: usize)  {
+pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize)  {
        let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
        assert_eq!(events_1.len(), 1);
        let as_update = match events_1[0] {
@@ -1162,7 +1190,7 @@ macro_rules! handle_chan_reestablish_msgs {
 
 /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
 /// for claims/fails they are separated out.
-pub fn reconnect_nodes<'a, 'b>(node_a: &Node<'a, 'b>, node_b: &Node<'a, 'b>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
+pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool))  {
        node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
        let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
        node_b.node.peer_connected(&node_a.node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
index a38ef6f34acc7ef034bd54fb7a7b4bc47cc0f4e4..fde5cf8268bc8157fa78d713f6d89aba7b54af3e 100644 (file)
@@ -53,7 +53,8 @@ use ln::functional_test_utils::*;
 #[test]
 fn test_insane_channel_opens() {
        // Stand up a network of 2 nodes
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -110,7 +111,8 @@ fn test_insane_channel_opens() {
 
 #[test]
 fn test_async_inbound_update_fee() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -222,7 +224,8 @@ fn test_async_inbound_update_fee() {
 fn test_update_fee_unordered_raa() {
        // Just the intro to the previous test followed by an out-of-order RAA (which caused a
        // crash in an earlier version of the update_fee patch)
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -274,7 +277,8 @@ fn test_update_fee_unordered_raa() {
 
 #[test]
 fn test_multi_flight_update_fee() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -390,7 +394,8 @@ fn test_1_conf_open() {
        bob_config.own_channel_config.minimum_depth = 1;
        bob_config.channel_options.announced_channel = true;
        bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -419,7 +424,8 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) {
        // serialization round-trips and simply do steps towards opening a channel and then drop the
        // Node objects.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -513,7 +519,8 @@ fn test_sanity_on_in_flight_opens() {
 
 #[test]
 fn test_update_fee_vanilla() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -553,7 +560,8 @@ fn test_update_fee_vanilla() {
 
 #[test]
 fn test_update_fee_that_funder_cannot_afford() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_value = 1888;
@@ -604,7 +612,8 @@ fn test_update_fee_that_funder_cannot_afford() {
 
 #[test]
 fn test_update_fee_with_fundee_update_add_htlc() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -700,7 +709,8 @@ fn test_update_fee_with_fundee_update_add_htlc() {
 
 #[test]
 fn test_update_fee() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -803,7 +813,8 @@ fn test_update_fee() {
 #[test]
 fn pre_funding_lock_shutdown_test() {
        // Test sending a shutdown prior to funding_locked after funding generation
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, InitFeatures::supported(), InitFeatures::supported());
@@ -831,7 +842,8 @@ fn pre_funding_lock_shutdown_test() {
 #[test]
 fn updates_shutdown_wait() {
        // Test sending a shutdown with outstanding updates pending
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -905,7 +917,8 @@ fn updates_shutdown_wait() {
 #[test]
 fn htlc_fail_async_shutdown() {
        // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -988,7 +1001,8 @@ fn htlc_fail_async_shutdown() {
 fn do_test_shutdown_rebroadcast(recv_count: u8) {
        // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
        // messages delivered prior to disconnect
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -1149,7 +1163,8 @@ fn test_shutdown_rebroadcast() {
 fn fake_network_test() {
        // Simple test which builds a network of ChannelManagers, connects them to each other, and
        // tests that payments get routed and transactions broadcast in semi-reasonable ways.
-       let node_cfgs = create_node_cfgs(4);
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
        let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
@@ -1280,7 +1295,8 @@ fn holding_cell_htlc_counting() {
        // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
        // to ensure we don't end up with HTLCs sitting around in our holding cell for several
        // commitment dance rounds.
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -1409,7 +1425,8 @@ fn holding_cell_htlc_counting() {
 fn duplicate_htlc_test() {
        // Test that we accept duplicate payment_hash HTLCs across the network and that
        // claiming/failing them are all separate and don't affect each other
-       let node_cfgs = create_node_cfgs(6);
+       let chanmon_cfgs = create_chanmon_cfgs(6);
+       let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
        let mut nodes = create_network(6, &node_cfgs, &node_chanmgrs);
 
@@ -1438,7 +1455,8 @@ fn test_duplicate_htlc_different_direction_onchain() {
        // Test that ChannelMonitor doesn't generate 2 preimage txn
        // when we have 2 HTLCs with same preimage that go across a node
        // in opposite directions.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -1505,7 +1523,8 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
 fn do_channel_reserve_test(test_recv: bool) {
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, InitFeatures::supported(), InitFeatures::supported());
@@ -1791,7 +1810,8 @@ fn channel_reserve_in_flight_removes() {
        //    removed it fully. B now has the push_msat plus the first two HTLCs in value.
        //  * Now B happily sends another HTLC, potentially violating its reserve value from A's point
        //    of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -1920,7 +1940,8 @@ fn channel_reserve_in_flight_removes() {
 fn channel_monitor_network_test() {
        // Simple test which builds a network of ChannelManagers, connects them to each other, and
        // tests that ChannelMonitor is able to recover from various states.
-       let node_cfgs = create_node_cfgs(5);
+       let chanmon_cfgs = create_chanmon_cfgs(5);
+       let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]);
        let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
 
@@ -2070,7 +2091,8 @@ fn test_justice_tx() {
        bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
        bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
        let user_cfgs = [Some(alice_config), Some(bob_config)];
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        // Create some new channels:
@@ -2161,7 +2183,8 @@ fn test_justice_tx() {
 fn revoked_output_claim() {
        // Simple test to ensure a node will claim a revoked output when a stale remote commitment
        // transaction is broadcast by its counterparty
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -2190,7 +2213,8 @@ fn revoked_output_claim() {
 #[test]
 fn claim_htlc_outputs_shared_tx() {
        // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -2265,7 +2289,8 @@ fn claim_htlc_outputs_shared_tx() {
 #[test]
 fn claim_htlc_outputs_single_tx() {
        // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -2378,7 +2403,8 @@ fn test_htlc_on_chain_success() {
        // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
        // PaymentSent event).
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -2547,7 +2573,8 @@ fn test_htlc_on_chain_timeout() {
        //            \                                  \
        //         B's HTLC timeout tx               B's timeout tx
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -2657,7 +2684,8 @@ fn test_simple_commitment_revoked_fail_backward() {
        // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
        // and fail backward accordingly.
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -2727,7 +2755,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
        // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
        //   and once they revoke the previous commitment transaction (allowing us to send a new
        //   commitment_signed) we will be free to fail/fulfill the HTLC backwards.
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -2940,7 +2969,8 @@ fn test_commitment_revoked_fail_backward_exhaustive_b() {
 fn test_htlc_ignore_latest_remote_commitment() {
        // Test that HTLC transactions spending the latest remote commitment transaction are simply
        // ignored if we cannot claim them. This originally tickled an invalid unwrap().
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -2964,7 +2994,8 @@ fn test_htlc_ignore_latest_remote_commitment() {
 #[test]
 fn test_force_close_fail_back() {
        // Check which HTLCs are failed-backwards on channel force-closure
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -3040,7 +3071,8 @@ fn test_force_close_fail_back() {
 #[test]
 fn test_unconf_chan() {
        // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -3071,7 +3103,8 @@ fn test_unconf_chan() {
 #[test]
 fn test_simple_peer_disconnect() {
        // Test that we can reconnect when there are no lost messages
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -3126,7 +3159,8 @@ fn test_simple_peer_disconnect() {
 
 fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
        // Test that we can reconnect when in-flight HTLC updates get dropped
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        if messages_delivered == 0 {
@@ -3334,7 +3368,8 @@ fn test_drop_messages_peer_disconnect_b() {
 #[test]
 fn test_funding_peer_disconnect() {
        // Test that we can lock in our funding tx while disconnected
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::supported(), InitFeatures::supported());
@@ -3418,7 +3453,8 @@ fn test_funding_peer_disconnect() {
 fn test_drop_messages_peer_disconnect_dual_htlc() {
        // Test that we can handle reconnecting when both sides of a channel have pending
        // commitment_updates when we disconnect.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -3559,7 +3595,8 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
 fn test_invalid_channel_announcement() {
        //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
        let secp_ctx = Secp256k1::new();
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -3633,10 +3670,13 @@ fn test_invalid_channel_announcement() {
 
 #[test]
 fn test_no_txn_manager_serialize_deserialize() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let fee_estimator: test_utils::TestFeeEstimator;
        let new_chan_monitor: test_utils::TestChannelMonitor;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>;
+       let keys_manager: test_utils::TestKeysInterface;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::supported(), InitFeatures::supported());
@@ -3647,7 +3687,8 @@ fn test_no_txn_manager_serialize_deserialize() {
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }));
+       fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), &fee_estimator);
        nodes[0].chan_monitor = &new_chan_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(Sha256dHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
@@ -3655,14 +3696,14 @@ fn test_no_txn_manager_serialize_deserialize() {
 
        let mut nodes_0_read = &nodes_0_serialized[..];
        let config = UserConfig::default();
-       let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+       keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()));
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &mut chan_0_monitor);
-               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: config,
-                       keys_manager,
-                       fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+                       keys_manager: &keys_manager,
+                       fee_estimator: &fee_estimator,
                        monitor: nodes[0].chan_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: Arc::new(test_utils::TestLogger::new()),
@@ -3672,7 +3713,7 @@ fn test_no_txn_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+       assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
        nodes[0].block_notifier.register_listener(nodes[0].node);
        assert_eq!(nodes[0].node.list_channels().len(), 1);
@@ -3701,10 +3742,13 @@ fn test_no_txn_manager_serialize_deserialize() {
 
 #[test]
 fn test_simple_manager_serialize_deserialize() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let fee_estimator: test_utils::TestFeeEstimator;
        let new_chan_monitor: test_utils::TestChannelMonitor;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>;
+       let keys_manager: test_utils::TestKeysInterface;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
@@ -3717,21 +3761,22 @@ fn test_simple_manager_serialize_deserialize() {
        let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
        nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
 
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }));
+       fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), &fee_estimator);
        nodes[0].chan_monitor = &new_chan_monitor;
        let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
        let (_, mut chan_0_monitor) = <(Sha256dHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
        assert!(chan_0_monitor_read.is_empty());
 
        let mut nodes_0_read = &nodes_0_serialized[..];
-       let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+       keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()));
        let (_, nodes_0_deserialized_tmp) = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &mut chan_0_monitor);
-               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                        default_config: UserConfig::default(),
-                       keys_manager,
-                       fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+                       keys_manager: &keys_manager,
+                       fee_estimator: &fee_estimator,
                        monitor: nodes[0].chan_monitor,
                        tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                        logger: Arc::new(test_utils::TestLogger::new()),
@@ -3741,7 +3786,7 @@ fn test_simple_manager_serialize_deserialize() {
        nodes_0_deserialized = nodes_0_deserialized_tmp;
        assert!(nodes_0_read.is_empty());
 
-       assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+       assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
        check_added_monitors!(nodes[0], 1);
 
@@ -3754,10 +3799,13 @@ fn test_simple_manager_serialize_deserialize() {
 #[test]
 fn test_manager_serialize_deserialize_inconsistent_monitor() {
        // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
-       let node_cfgs = create_node_cfgs(4);
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+       let fee_estimator: test_utils::TestFeeEstimator;
        let new_chan_monitor: test_utils::TestChannelMonitor;
-       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>;
+       let keys_manager: test_utils::TestKeysInterface;
+       let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>;
        let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
        create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::supported(), InitFeatures::supported());
@@ -3782,7 +3830,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
                node_0_monitors_serialized.push(writer.0);
        }
 
-       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }));
+       fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+       new_chan_monitor = test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), &fee_estimator);
        nodes[0].chan_monitor = &new_chan_monitor;
        let mut node_0_monitors = Vec::new();
        for serialized in node_0_monitors_serialized.iter() {
@@ -3793,11 +3842,11 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        let mut nodes_0_read = &nodes_0_serialized[..];
-       let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
-       let (_, nodes_0_deserialized_tmp) = <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+       keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()));
+       let (_, nodes_0_deserialized_tmp) = <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
                default_config: UserConfig::default(),
-               keys_manager,
-               fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+               keys_manager: &keys_manager,
+               fee_estimator: &fee_estimator,
                monitor: nodes[0].chan_monitor,
                tx_broadcaster: nodes[0].tx_broadcaster.clone(),
                logger: Arc::new(test_utils::TestLogger::new()),
@@ -3814,7 +3863,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
        }
 
        for monitor in node_0_monitors.drain(..) {
-               assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
+               assert!(nodes[0].chan_monitor.add_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
                check_added_monitors!(nodes[0], 1);
        }
        nodes[0].node = &nodes_0_deserialized;
@@ -3956,7 +4005,8 @@ macro_rules! check_spendable_outputs {
 #[test]
 fn test_claim_sizeable_push_msat() {
        // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -3979,7 +4029,8 @@ fn test_claim_sizeable_push_msat() {
 fn test_claim_on_remote_sizeable_push_msat() {
        // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
        // to_remote output is encumbered by a P2WPKH
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4006,7 +4057,8 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
        // to_remote output is encumbered by a P2WPKH
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4032,7 +4084,8 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
 
 #[test]
 fn test_static_spendable_outputs_preimage_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4077,7 +4130,8 @@ eprintln!("{:?}", node_txn[1]);
 
 #[test]
 fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4108,7 +4162,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
 
 #[test]
 fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4154,7 +4209,8 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
 
 #[test]
 fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4209,7 +4265,8 @@ fn test_onchain_to_onchain_claim() {
        // Finally, check that B will claim the HTLC output if A's latest commitment transaction
        // gets broadcast.
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -4301,7 +4358,8 @@ fn test_onchain_to_onchain_claim() {
 fn test_duplicate_payment_hash_one_failure_one_success() {
        // Topology : A --> B --> C
        // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -4420,7 +4478,8 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
 #[test]
 fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4471,7 +4530,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        //    - C - D -
        // B /         \ F
        // And test where C fails back to A/B when D announces its latest commitment transaction
-       let node_cfgs = create_node_cfgs(6);
+       let chanmon_cfgs = create_chanmon_cfgs(6);
+       let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
        let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
 
@@ -4712,7 +4772,8 @@ fn test_fail_backwards_previous_remote_announce() {
 
 #[test]
 fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4749,7 +4810,8 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
 
 #[test]
 fn test_static_output_closing_tx() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -4771,7 +4833,8 @@ fn test_static_output_closing_tx() {
 }
 
 fn do_htlc_claim_local_commitment_only(use_dust: bool) {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -4810,7 +4873,8 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -4837,7 +4901,8 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5136,7 +5201,8 @@ fn test_onion_failure() {
        const NODE: u16 = 0x2000;
        const UPDATE: u16 = 0x1000;
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        for node in nodes.iter() {
@@ -5364,7 +5430,8 @@ fn test_onion_failure() {
 #[test]
 #[should_panic]
 fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        //Force duplicate channel ids
@@ -5385,7 +5452,8 @@ fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on i
 
 #[test]
 fn bolt2_open_channel_sending_node_checks_part2() {
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -5435,7 +5503,8 @@ fn bolt2_open_channel_sending_node_checks_part2() {
 fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
        //BOLT2 Requirement: MUST offer amount_msat greater than 0.
        //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5459,7 +5528,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
 fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
        //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
        //It is enforced when constructing a route.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, InitFeatures::supported(), InitFeatures::supported());
@@ -5480,7 +5550,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
        //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
        //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::supported(), InitFeatures::supported());
@@ -5525,7 +5596,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
 #[test]
 fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_value = 100000;
@@ -5553,7 +5625,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
 #[test]
 fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5578,7 +5651,8 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
 #[test]
 fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5603,7 +5677,8 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
 fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
        //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5646,7 +5721,8 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
 #[test]
 fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5666,7 +5742,8 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
 #[test]
 fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5688,7 +5765,8 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
        // We test this by first testing that that repeated HTLCs pass commitment signature checks
        // after disconnect and that non-sequential htlc_ids result in a channel failure.
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5731,7 +5809,8 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
 fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5760,7 +5839,8 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
 fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5789,7 +5869,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
 fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
        //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions:     MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5819,7 +5900,8 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
 fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5858,7 +5940,8 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
 fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -5898,7 +5981,8 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
        //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::supported(), InitFeatures::supported());
@@ -5942,7 +6026,8 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda
        //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
        //    * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::supported(), InitFeatures::supported());
@@ -6019,7 +6104,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
        // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -6111,7 +6197,8 @@ fn test_no_failure_dust_htlc_local_commitment() {
        // Transaction filters for failing back dust htlc based on local commitment txn infos has been
        // prone to error, we test here that a dummy transaction don't fail them.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -6167,7 +6254,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
        // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
 
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
@@ -6300,7 +6388,8 @@ fn test_upfront_shutdown_script() {
        config.peer_channel_config_limits.force_announced_channel_preference = false;
        config.channel_options.commit_upfront_shutdown_pubkey = false;
        let user_cfgs = [None, Some(config), None];
-       let node_cfgs = create_node_cfgs(3);
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
        let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 
@@ -6397,13 +6486,14 @@ fn test_user_configurable_csv_delay() {
        let mut high_their_to_self_config = UserConfig::default();
        high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
        let user_cfgs = [Some(high_their_to_self_config.clone()), None];
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
        let keys_manager: Arc<KeysInterface<ChanKeySigner = EnforcingChannelKeys>> = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
-       if let Err(error) = Channel::new_outbound(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
+       if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
                match error {
                        APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
                        _ => panic!("Unexpected event"),
@@ -6414,7 +6504,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::supported(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
+       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::supported(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
                match error {
                        ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
                        _ => panic!("Unexpected event"),
@@ -6440,7 +6530,7 @@ fn test_user_configurable_csv_delay() {
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::supported(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &high_their_to_self_config) {
+       if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::supported(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &high_their_to_self_config) {
                match error {
                        ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); },
                        _ => panic!("Unexpected event"),
@@ -6454,9 +6544,13 @@ fn test_data_loss_protect() {
        // * we don't broadcast our Local Commitment Tx in case of fallen behind
        // * we close channel in case of detecting other being fallen behind
        // * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
+       let keys_manager;
+       let fee_estimator;
+       let tx_broadcaster;
        let monitor;
        let node_state_0;
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -6477,24 +6571,25 @@ fn test_data_loss_protect() {
        let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
        let mut chan_monitor = <(Sha256dHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
        let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
-       let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), broadcasted_txn: Mutex::new(HashSet::new())});
-       let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
-       monitor = test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone());
+       tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), broadcasted_txn: Mutex::new(HashSet::new())};
+       fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+       keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger));
+       monitor = test_utils::TestChannelMonitor::new(chain_monitor.clone(), &tx_broadcaster, logger.clone(), &fee_estimator);
        node_state_0 = {
                let mut channel_monitors = HashMap::new();
                channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chan_monitor);
-               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
-                       keys_manager: Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger))),
-                       fee_estimator: feeest.clone(),
+               <(Sha256dHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChannelMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+                       keys_manager: &keys_manager,
+                       fee_estimator: &fee_estimator,
                        monitor: &monitor,
                        logger: Arc::clone(&logger),
-                       tx_broadcaster,
+                       tx_broadcaster: &tx_broadcaster,
                        default_config: UserConfig::default(),
                        channel_monitors: &mut channel_monitors,
                }).unwrap().1
        };
        nodes[0].node = &node_state_0;
-       assert!(monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok());
+       assert!(monitor.add_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok());
        nodes[0].chan_monitor = &monitor;
        nodes[0].chain_monitor = chain_monitor;
 
@@ -6573,7 +6668,8 @@ fn test_check_htlc_underpaying() {
        // sending a probe payment (i.e less than expected value0
        // to B, B should refuse payment.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -6621,7 +6717,8 @@ fn test_announce_disable_channels() {
        // Create 2 channels between A and B. Disconnect B. Call timer_chan_freshness_every_min and check for generated
        // ChannelUpdate. Reconnect B, reestablish and check there is non-generated ChannelUpdate.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -6682,7 +6779,8 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to be sure
        // we're able to claim outputs on revoked commitment transaction before timelocks expiration
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -6783,7 +6881,8 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        // In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
        // we're able to claim outputs on revoked HTLC transactions before timelocks expiration
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -6938,7 +7037,8 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        // Provide preimage for one
        // Check aggregation
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -7045,7 +7145,8 @@ fn test_set_outpoints_partial_claiming() {
        // - remote party claim tx, new bump tx
        // - disconnect remote claiming tx, new bump
        // - disconnect tx, see no tx anymore
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
@@ -7144,7 +7245,8 @@ fn test_counterparty_raa_skip_no_crash() {
        // check simply that the channel is closed in response to such an RAA, but don't check whether
        // we decide to punish our counterparty for revoking their funds (as we don't currently
        // implement that).
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
@@ -7165,7 +7267,8 @@ fn test_bump_txn_sanitize_tracking_maps() {
        // Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
        // verify we clean then right after expiration of ANTI_REORG_DELAY.
 
-       let node_cfgs = create_node_cfgs(2);
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
index 9a2a90fd4fd6e9444bf3a5cbcaa8f68b97be9407..4cb1b89cc53bced2e3003d7a16fdd77e1819e278 100644 (file)
@@ -27,8 +27,11 @@ mod onion_utils;
 mod wire;
 
 #[cfg(test)]
-#[macro_use] mod functional_test_utils;
+#[macro_use]
+pub(crate) mod functional_test_utils;
 #[cfg(test)]
 mod functional_tests;
 #[cfg(test)]
 mod chanmon_update_fail_tests;
+#[cfg(test)]
+mod reorg_tests;
index 44d08030062fb47b337b879901b6967a73776d58..60df3d80196698234146400260411ef5f4a70b2e 100644 (file)
@@ -47,14 +47,15 @@ pub struct MessageHandler<CM: Deref> where CM::Target: msgs::ChannelMessageHandl
 /// For efficiency, Clone should be relatively cheap for this type.
 ///
 /// You probably want to just extend an int and put a file descriptor in a struct and implement
-/// send_data. Note that if you are using a higher-level net library that may close() itself, be
-/// careful to ensure you don't have races whereby you might register a new connection with an fd
-/// the same as a yet-to-be-disconnect_event()-ed.
+/// send_data. Note that if you are using a higher-level net library that may call close() itself,
+/// be careful to ensure you don't have races whereby you might register a new connection with an
+/// fd which is the same as a previous one which has yet to be removed via
+/// PeerManager::socket_disconnected().
 pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
        /// Attempts to send some data from the given slice to the peer.
        ///
        /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
-       /// Note that in the disconnected case, a disconnect_event must still fire and further write
+       /// Note that in the disconnected case, socket_disconnected must still fire and further write
        /// attempts may occur until that time.
        ///
        /// If the returned size is smaller than data.len(), a write_available event must
@@ -67,17 +68,18 @@ pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
        /// *not* imply that further read events should be paused.
        fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize;
        /// Disconnect the socket pointed to by this SocketDescriptor. Once this function returns, no
-       /// more calls to write_event, read_event or disconnect_event may be made with this descriptor.
-       /// No disconnect_event should be generated as a result of this call, though obviously races
-       /// may occur whereby disconnect_socket is called after a call to disconnect_event but prior to
-       /// that event completing.
+       /// more calls to write_buffer_space_avail, read_event or socket_disconnected may be made with
+       /// this descriptor. No socket_disconnected call should be generated as a result of this call,
+       /// though races may occur whereby disconnect_socket is called after a call to
+       /// socket_disconnected but prior to socket_disconnected returning.
        fn disconnect_socket(&mut self);
 }
 
 /// Error for PeerManager errors. If you get one of these, you must disconnect the socket and
-/// generate no further read/write_events for the descriptor, only triggering a single
-/// disconnect_event (unless it was provided in response to a new_*_connection event, in which case
-/// no such disconnect_event must be generated and the socket be silently disconencted).
+/// generate no further read_event/write_buffer_space_avail calls for the descriptor, only
+/// triggering a single socket_disconnected call (unless it was provided in response to a
+/// new_*_connection event, in which case no such socket_disconnected() must be called and the
+/// socket silently disconencted).
 pub struct PeerHandleError {
        /// Used to indicate that we probably can't make any future connections to this peer, implying
        /// we should go ahead and force-close any channels we have with it.
@@ -160,7 +162,7 @@ fn _check_usize_is_32_or_64() {
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
 /// SimpleRefPeerManager is the more appropriate type. Defining these type aliases prevents
 /// issues such as overly long function definitions.
-pub type SimpleArcPeerManager<SD, M> = Arc<PeerManager<SD, SimpleArcChannelManager<M>>>;
+pub type SimpleArcPeerManager<SD, M, T, F> = Arc<PeerManager<SD, SimpleArcChannelManager<M, T, F>>>;
 
 /// SimpleRefPeerManager is a type alias for a PeerManager reference, and is the reference
 /// counterpart to the SimpleArcPeerManager type alias. Use this type by default when you don't
@@ -168,7 +170,7 @@ pub type SimpleArcPeerManager<SD, M> = Arc<PeerManager<SD, SimpleArcChannelManag
 /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
 /// helps with issues such as long function definitions.
-pub type SimpleRefPeerManager<'a, SD, M> = PeerManager<SD, SimpleRefChannelManager<'a, M>>;
+pub type SimpleRefPeerManager<'a, 'b, 'c, 'd, SD, M, T, F> = PeerManager<SD, SimpleRefChannelManager<'a, 'b, 'c, 'd, M, T, F>>;
 
 /// A PeerManager manages a set of peers, described by their SocketDescriptor and marshalls socket
 /// events into messages which it passes on to its MessageHandlers.
@@ -201,7 +203,7 @@ macro_rules! encode_msg {
 }
 
 /// Manages and reacts to connection events. You probably want to use file descriptors as PeerIds.
-/// PeerIds may repeat, but only after disconnect_event() has been called.
+/// PeerIds may repeat, but only after socket_disconnected() has been called.
 impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where CM::Target: msgs::ChannelMessageHandler {
        /// Constructs a new PeerManager with the given message handlers and node_id secret key
        /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
@@ -254,13 +256,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
        }
 
        /// Indicates a new outbound connection has been established to a node with the given node_id.
-       /// Note that if an Err is returned here you MUST NOT call disconnect_event for the new
+       /// Note that if an Err is returned here you MUST NOT call socket_disconnected for the new
        /// descriptor but must disconnect the connection immediately.
        ///
        /// Returns a small number of bytes to send to the remote node (currently always 50).
        ///
-       /// Panics if descriptor is duplicative with some other descriptor which has not yet has a
-       /// disconnect_event.
+       /// Panics if descriptor is duplicative with some other descriptor which has not yet had a
+       /// socket_disconnected().
        pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
                let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
                let res = peer_encryptor.get_act_one().to_vec();
@@ -294,11 +296,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
        ///
        /// May refuse the connection by returning an Err, but will never write bytes to the remote end
        /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
-       /// call disconnect_event for the new descriptor but must disconnect the connection
+       /// call socket_disconnected for the new descriptor but must disconnect the connection
        /// immediately.
        ///
-       /// Panics if descriptor is duplicative with some other descriptor which has not yet has a
-       /// disconnect_event.
+       /// Panics if descriptor is duplicative with some other descriptor which has not yet had
+       /// socket_disconnected called.
        pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
                let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
                let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
@@ -406,10 +408,11 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
        ///
        /// Will most likely call send_data on the descriptor passed in (or the descriptor handed into
        /// new_*\_connection) before returning. Thus, be very careful with reentrancy issues! The
-       /// invariants around calling write_event in case a write did not fully complete must still
-       /// hold - be ready to call write_event again if a write call generated here isn't sufficient!
-       /// Panics if the descriptor was not previously registered in a new_\*_connection event.
-       pub fn write_event(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
+       /// invariants around calling write_buffer_space_avail in case a write did not fully complete
+       /// must still hold - be ready to call write_buffer_space_avail again if a write call generated
+       /// here isn't sufficient! Panics if the descriptor was not previously registered in a
+       /// new_\*_connection event.
+       pub fn write_buffer_space_avail(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
                let mut peers = self.peers.lock().unwrap();
                match peers.peers.get_mut(descriptor) {
                        None => panic!("Descriptor for write_event is not already known to PeerManager"),
@@ -429,8 +432,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
        /// Thus, however, you almost certainly want to call process_events() after any read_event to
        /// generate send_data calls to handle responses.
        ///
-       /// If Ok(true) is returned, further read_events should not be triggered until a write_event on
-       /// this file descriptor has resume_read set (preventing DoS issues in the send buffer).
+       /// If Ok(true) is returned, further read_events should not be triggered until a send_data call
+       /// on this file descriptor has resume_read set (preventing DoS issues in the send buffer).
        ///
        /// Panics if the descriptor was not previously registered in a new_*_connection event.
        pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
@@ -752,10 +755,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
 
                                                                                        // Unknown messages:
                                                                                        wire::Message::Unknown(msg_type) if msg_type.is_even() => {
+                                                                                               log_debug!(self, "Received unknown even message of type {}, disconnecting peer!", msg_type);
                                                                                                // Fail the channel if message is an even, unknown type as per BOLT #1.
                                                                                                return Err(PeerHandleError{ no_connection_possible: true });
                                                                                        },
-                                                                                       wire::Message::Unknown(_) => {},
+                                                                                       wire::Message::Unknown(msg_type) => {
+                                                                                               log_trace!(self, "Received unknown odd message of type {}, ignoring", msg_type);
+                                                                                       },
                                                                                }
                                                                        }
                                                                }
@@ -1036,11 +1042,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
 
        /// Indicates that the given socket descriptor's connection is now closed.
        ///
-       /// This must be called even if a PeerHandleError was given for a read_event or write_event,
-       /// but must NOT be called if a PeerHandleError was provided out of a new_\*\_connection event!
+       /// This must only be called if the socket has been disconnected by the peer or your own
+       /// decision to disconnect it and must NOT be called in any case where other parts of this
+       /// library (eg PeerHandleError, explicit disconnect_socket calls) instruct you to disconnect
+       /// the peer.
        ///
        /// Panics if the descriptor was not previously registered in a successful new_*_connection event.
-       pub fn disconnect_event(&self, descriptor: &Descriptor) {
+       pub fn socket_disconnected(&self, descriptor: &Descriptor) {
                self.disconnect_event_internal(descriptor, false);
        }
 
@@ -1073,34 +1081,48 @@ impl<Descriptor: SocketDescriptor, CM: Deref> PeerManager<Descriptor, CM> where
                        let peers_needing_send = &mut peers.peers_needing_send;
                        let node_id_to_descriptor = &mut peers.node_id_to_descriptor;
                        let peers = &mut peers.peers;
+                       let mut descriptors_needing_disconnect = Vec::new();
 
                        peers.retain(|descriptor, peer| {
-                               if peer.awaiting_pong == true {
+                               if peer.awaiting_pong {
                                        peers_needing_send.remove(descriptor);
+                                       descriptors_needing_disconnect.push(descriptor.clone());
                                        match peer.their_node_id {
                                                Some(node_id) => {
+                                                       log_trace!(self, "Disconnecting peer with id {} due to ping timeout", node_id);
                                                        node_id_to_descriptor.remove(&node_id);
-                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, true);
+                                                       self.message_handler.chan_handler.peer_disconnected(&node_id, false);
+                                               }
+                                               None => {
+                                                       // This can't actually happen as we should have hit
+                                                       // is_ready_for_encryption() previously on this same peer.
+                                                       unreachable!();
                                                },
-                                               None => {}
                                        }
+                                       return false;
+                               }
+
+                               if !peer.channel_encryptor.is_ready_for_encryption() {
+                                       // The peer needs to complete its handshake before we can exchange messages
+                                       return true;
                                }
 
                                let ping = msgs::Ping {
                                        ponglen: 0,
                                        byteslen: 64,
                                };
-                               peer.pending_outbound_buffer.push_back(encode_msg!(&ping));
+                               peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(&ping)));
+
                                let mut descriptor_clone = descriptor.clone();
                                self.do_attempt_write_data(&mut descriptor_clone, peer);
 
-                               if peer.awaiting_pong {
-                                       false // Drop the peer
-                               } else {
-                                       peer.awaiting_pong = true;
-                                       true
-                               }
+                               peer.awaiting_pong = true;
+                               true
                        });
+
+                       for mut descriptor in descriptors_needing_disconnect.drain(..) {
+                               descriptor.disconnect_socket();
+                       }
                }
        }
 }
@@ -1118,15 +1140,29 @@ mod tests {
 
        use rand::{thread_rng, Rng};
 
-       use std::sync::{Arc};
+       use std;
+       use std::sync::{Arc, Mutex};
 
-       #[derive(PartialEq, Eq, Clone, Hash)]
+       #[derive(Clone)]
        struct FileDescriptor {
                fd: u16,
+               outbound_data: Arc<Mutex<Vec<u8>>>,
+       }
+       impl PartialEq for FileDescriptor {
+               fn eq(&self, other: &Self) -> bool {
+                       self.fd == other.fd
+               }
+       }
+       impl Eq for FileDescriptor { }
+       impl std::hash::Hash for FileDescriptor {
+               fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) {
+                       self.fd.hash(hasher)
+               }
        }
 
        impl SocketDescriptor for FileDescriptor {
                fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize {
+                       self.outbound_data.lock().unwrap().extend_from_slice(data);
                        data.len()
                }
 
@@ -1167,10 +1203,14 @@ mod tests {
 
        fn establish_connection<'a>(peer_a: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler>, peer_b: &PeerManager<FileDescriptor, &'a test_utils::TestChannelMessageHandler>) {
                let secp_ctx = Secp256k1::new();
-               let their_id = PublicKey::from_secret_key(&secp_ctx, &peer_b.our_node_secret);
-               let fd = FileDescriptor { fd: 1};
-               peer_a.new_inbound_connection(fd.clone()).unwrap();
-               peer_a.peers.lock().unwrap().node_id_to_descriptor.insert(their_id, fd.clone());
+               let a_id = PublicKey::from_secret_key(&secp_ctx, &peer_a.our_node_secret);
+               let mut fd_a = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+               let mut fd_b = FileDescriptor { fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())) };
+               let initial_data = peer_b.new_outbound_connection(a_id, fd_b.clone()).unwrap();
+               peer_a.new_inbound_connection(fd_a.clone()).unwrap();
+               assert_eq!(peer_a.read_event(&mut fd_a, initial_data).unwrap(), false);
+               assert_eq!(peer_b.read_event(&mut fd_b, fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
+               assert_eq!(peer_a.read_event(&mut fd_a, fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false);
        }
 
        #[test]
diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs
new file mode 100644 (file)
index 0000000..a1051f5
--- /dev/null
@@ -0,0 +1,162 @@
+//! Further functional tests which test blockchain reorganizations.
+
+use ln::channelmonitor::ANTI_REORG_DELAY;
+use ln::features::InitFeatures;
+use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+
+use bitcoin::util::hash::BitcoinHash;
+use bitcoin::blockdata::block::{Block, BlockHeader};
+
+use std::default::Default;
+
+use ln::functional_test_utils::*;
+
+fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
+       // Our on-chain HTLC-claim learning has a few properties worth testing:
+       //  * If an upstream HTLC is claimed with a preimage (both against our own commitment
+       //    transaction our counterparty's), we claim it backwards immediately.
+       //  * If an upstream HTLC is claimed with a timeout, we delay ANTI_REORG_DELAY before failing
+       //    it backwards to ensure our counterparty can't claim with a preimage in a reorg.
+       //
+       // Here we test both properties in any combination based on the two bools passed in as
+       // arguments.
+       //
+       // If local_commitment is set, we first broadcast a local commitment containing an offered HTLC
+       // and an HTLC-Timeout tx, otherwise we broadcast a remote commitment containing a received
+       // HTLC and a local HTLC-Timeout tx spending it.
+       //
+       // We then either allow these transactions to confirm (if !claim) or we wait until one block
+       // before they otherwise would and reorg them out, confirming an HTLC-Success tx instead.
+       let chanmon_cfgs = create_chanmon_cfgs(3);
+       let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+       let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+       create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+       let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
+
+       let (our_payment_preimage, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+
+       // Provide preimage to node 2 by claiming payment
+       nodes[2].node.claim_funds(our_payment_preimage, 1000000);
+       check_added_monitors!(nodes[2], 1);
+       get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+
+       let mut headers = Vec::new();
+       let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+       let claim_txn = if local_commitment {
+               // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
+               let node_1_commitment_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+               assert_eq!(node_1_commitment_txn.len(), 2); // 1 local commitment tx, 1 Outbound HTLC-Timeout
+               assert_eq!(node_1_commitment_txn[0].output.len(), 2); // to-self and Offered HTLC (to-remote/to-node-3 is dust)
+               check_spends!(node_1_commitment_txn[0], chan_2.3);
+               check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0].clone());
+
+               // Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
+               nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+               check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+               let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
+               assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
+               check_spends!(node_2_commitment_txn[1], chan_2.3);
+               check_spends!(node_2_commitment_txn[2], node_2_commitment_txn[1].clone());
+               check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]);
+
+               // Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
+               nodes[1].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+
+               // ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
+               vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
+       } else {
+               // Broadcast node 2 commitment txn
+               let node_2_commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+               assert_eq!(node_2_commitment_txn.len(), 2); // 1 local commitment tx, 1 Received HTLC-Claim
+               assert_eq!(node_2_commitment_txn[0].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
+               check_spends!(node_2_commitment_txn[0], chan_2.3);
+               check_spends!(node_2_commitment_txn[1], node_2_commitment_txn[0].clone());
+
+               // Give node 1 node 2's commitment transaction and get its response (timing the HTLC out)
+               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+               let node_1_commitment_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+               assert_eq!(node_1_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Timeout, ChannelManger: 1 local commitment tx, 1 Offered HTLC-Timeout
+               assert_eq!(node_1_commitment_txn[1].output.len(), 2); // to-local and Offered HTLC (to-remote is dust)
+               check_spends!(node_1_commitment_txn[1], chan_2.3);
+               check_spends!(node_1_commitment_txn[2], node_1_commitment_txn[1].clone());
+               check_spends!(node_1_commitment_txn[0], node_2_commitment_txn[0]);
+
+               // Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1
+               nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+               // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
+               node_2_commitment_txn
+       };
+       check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+       headers.push(header.clone());
+       // At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH +
+       // ANTI_REORG_DELAY - 1 will give us a confirmation count of ANTI_REORG_DELAY - 1.
+       for i in CHAN_CONFIRM_DEPTH + 2..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1 {
+               header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[1].block_notifier.block_connected_checked(&header, i, &vec![], &[0; 0]);
+               headers.push(header.clone());
+       }
+       check_added_monitors!(nodes[1], 0);
+       assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+
+       if claim {
+               // Now reorg back to CHAN_CONFIRM_DEPTH and confirm node 2's broadcasted transactions:
+               for (height, header) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(headers.iter()).rev() {
+                       nodes[1].block_notifier.block_disconnected(&header, height);
+               }
+
+               header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[1].block_notifier.block_connected(&Block { header, txdata: claim_txn }, CHAN_CONFIRM_DEPTH + 1);
+
+               // ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_htlcs_updated when we
+               // probe it for events, so we probe non-message events here (which should still end up empty):
+               assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+       } else {
+               // Confirm the timeout tx and check that we fail the HTLC backwards
+               header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[1].block_notifier.block_connected_checked(&header, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY, &vec![], &[0; 0]);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+       }
+
+       check_added_monitors!(nodes[1], 1);
+       // Which should result in an immediate claim/fail of the HTLC:
+       let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+       if claim {
+               assert_eq!(htlc_updates.update_fulfill_htlcs.len(), 1);
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fulfill_htlcs[0]);
+       } else {
+               assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
+       }
+       commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false, true);
+       if claim {
+               expect_payment_sent!(nodes[0], our_payment_preimage);
+       } else {
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               if let MessageSendEvent::PaymentFailureNetworkUpdate { update: HTLCFailChannelUpdate::ChannelClosed { ref is_permanent, .. } } = events[0] {
+                       assert!(is_permanent);
+               } else { panic!("Unexpected event!"); }
+               expect_payment_failed!(nodes[0], our_payment_hash, false);
+       }
+}
+
+#[test]
+fn test_onchain_htlc_claim_reorg_local_commitment() {
+       do_test_onchain_htlc_reorg(true, true);
+}
+#[test]
+fn test_onchain_htlc_timeout_delay_local_commitment() {
+       do_test_onchain_htlc_reorg(true, false);
+}
+#[test]
+fn test_onchain_htlc_claim_reorg_remote_commitment() {
+       do_test_onchain_htlc_reorg(false, true);
+}
+#[test]
+fn test_onchain_htlc_timeout_delay_remote_commitment() {
+       do_test_onchain_htlc_reorg(false, false);
+}
index 27f837775ef19ad4c605a6e3b0659d158ed47164..35bf00bd85614e6c9cabd560be8e73005b7386cf 100644 (file)
@@ -33,7 +33,7 @@ pub enum APIError {
                /// A human-readable error message
                err: &'static str
        },
-       /// An attempt to call add_update_monitor returned an Err (ie you did this!), causing the
+       /// An attempt to call add/update_monitor returned an Err (ie you did this!), causing the
        /// attempted action to fail.
        MonitorUpdateFailed,
 }
index eda6fc7ee4eab89068a0b7f7a5848c5a3d2e2810..d165f200adbe5616fd914e4fb647097fd40035a3 100644 (file)
@@ -4,18 +4,12 @@
 //! Because we don't have a built-in runtime, it's up to the client to call events at a time in the
 //! future, as well as generate and broadcast funding transactions handle payment preimages and a
 //! few other things.
-//!
-//! Note that many events are handled for you by PeerHandler, so in the common design of having a
-//! PeerManager which marshalls messages to ChannelManager and Router you only need to call
-//! process_events on the PeerHandler and then get_and_clear_pending_events and handle the events
-//! that bubble up to the surface. If, however, you do not have a PeerHandler managing a
-//! ChannelManager you need to handle all of the events which may be generated.
-//TODO: We need better separation of event types ^
 
 use ln::msgs;
 use ln::channelmanager::{PaymentPreimage, PaymentHash};
 use chain::transaction::OutPoint;
 use chain::keysinterface::SpendableOutputDescriptor;
+use util::ser::{Writeable, Writer, MaybeReadable, Readable};
 
 use bitcoin::blockdata::script::Script;
 
@@ -24,6 +18,10 @@ use secp256k1::key::PublicKey;
 use std::time::Duration;
 
 /// An Event which you should probably take some action in response to.
+///
+/// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use
+/// them directly as they don't round-trip exactly (for example FundingGenerationReady is never
+/// written as it makes no sense to respond to it after reconnecting to peers).
 pub enum Event {
        /// Used to indicate that the client should generate a funding transaction with the given
        /// parameters and then call ChannelManager::funding_transaction_generated.
@@ -108,6 +106,91 @@ pub enum Event {
        },
 }
 
+impl Writeable for Event {
+       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+               match self {
+                       &Event::FundingGenerationReady { .. } => {
+                               0u8.write(writer)?;
+                               // We never write out FundingGenerationReady events as, upon disconnection, peers
+                               // drop any channels which have not yet exchanged funding_signed.
+                       },
+                       &Event::FundingBroadcastSafe { ref funding_txo, ref user_channel_id } => {
+                               1u8.write(writer)?;
+                               funding_txo.write(writer)?;
+                               user_channel_id.write(writer)?;
+                       },
+                       &Event::PaymentReceived { ref payment_hash, ref amt } => {
+                               2u8.write(writer)?;
+                               payment_hash.write(writer)?;
+                               amt.write(writer)?;
+                       },
+                       &Event::PaymentSent { ref payment_preimage } => {
+                               3u8.write(writer)?;
+                               payment_preimage.write(writer)?;
+                       },
+                       &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest,
+                               #[cfg(test)]
+                               ref error_code,
+                       } => {
+                               4u8.write(writer)?;
+                               payment_hash.write(writer)?;
+                               rejected_by_dest.write(writer)?;
+                               #[cfg(test)]
+                               error_code.write(writer)?;
+                       },
+                       &Event::PendingHTLCsForwardable { time_forwardable: _ } => {
+                               5u8.write(writer)?;
+                               // We don't write the time_fordwardable out at all, as we presume when the user
+                               // deserializes us at least that much time has elapsed.
+                       },
+                       &Event::SpendableOutputs { ref outputs } => {
+                               6u8.write(writer)?;
+                               (outputs.len() as u64).write(writer)?;
+                               for output in outputs.iter() {
+                                       output.write(writer)?;
+                               }
+                       },
+               }
+               Ok(())
+       }
+}
+impl<R: ::std::io::Read> MaybeReadable<R> for Event {
+       fn read(reader: &mut R) -> Result<Option<Self>, msgs::DecodeError> {
+               match Readable::read(reader)? {
+                       0u8 => Ok(None),
+                       1u8 => Ok(Some(Event::FundingBroadcastSafe {
+                                       funding_txo: Readable::read(reader)?,
+                                       user_channel_id: Readable::read(reader)?,
+                               })),
+                       2u8 => Ok(Some(Event::PaymentReceived {
+                                       payment_hash: Readable::read(reader)?,
+                                       amt: Readable::read(reader)?,
+                               })),
+                       3u8 => Ok(Some(Event::PaymentSent {
+                                       payment_preimage: Readable::read(reader)?,
+                               })),
+                       4u8 => Ok(Some(Event::PaymentFailed {
+                                       payment_hash: Readable::read(reader)?,
+                                       rejected_by_dest: Readable::read(reader)?,
+                                       #[cfg(test)]
+                                       error_code: Readable::read(reader)?,
+                               })),
+                       5u8 => Ok(Some(Event::PendingHTLCsForwardable {
+                                       time_forwardable: Duration::from_secs(0)
+                               })),
+                       6u8 => {
+                               let outputs_len: u64 = Readable::read(reader)?;
+                               let mut outputs = Vec::new();
+                               for _ in 0..outputs_len {
+                                       outputs.push(Readable::read(reader)?);
+                               }
+                               Ok(Some(Event::SpendableOutputs { outputs }))
+                       },
+                       _ => Err(msgs::DecodeError::InvalidValue)
+               }
+       }
+}
+
 /// An event generated by ChannelManager which indicates a message should be sent to a peer (or
 /// broadcast to most peers).
 /// These events are handled by PeerManager::process_events if you are using a PeerManager.
index 1b98e341fad6f35e94e555528865ed1cee508b5a..96936fe95416fd06a1061e323dbbe1952cb09c09 100644 (file)
@@ -11,7 +11,9 @@ use std::cmp;
 use secp256k1::Signature;
 use secp256k1::key::{PublicKey, SecretKey};
 use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::transaction::OutPoint;
+use bitcoin::blockdata::transaction::{OutPoint, Transaction, TxOut};
+use bitcoin::consensus;
+use bitcoin::consensus::Encodable;
 use bitcoin_hashes::sha256d::Hash as Sha256dHash;
 use std::marker::Sized;
 use ln::msgs::DecodeError;
@@ -189,6 +191,15 @@ pub trait ReadableArgs<R, P>
        fn read(reader: &mut R, params: P) -> Result<Self, DecodeError>;
 }
 
+/// A trait that various rust-lightning types implement allowing them to (maybe) be read in from a Read
+pub trait MaybeReadable<R>
+       where Self: Sized,
+             R: Read
+{
+       /// Reads a Self in from the given Read
+       fn read(reader: &mut R) -> Result<Option<Self>, DecodeError>;
+}
+
 pub(crate) struct U48(pub u64);
 impl Writeable for U48 {
        #[inline]
@@ -625,6 +636,33 @@ impl<R: Read> Readable<R> for OutPoint {
        }
 }
 
+macro_rules! impl_consensus_ser {
+       ($bitcoin_type: ty) => {
+               impl Writeable for $bitcoin_type {
+                       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+                               match self.consensus_encode(WriterWriteAdaptor(writer)) {
+                                       Ok(_) => Ok(()),
+                                       Err(consensus::encode::Error::Io(e)) => Err(e),
+                                       Err(_) => panic!("We shouldn't get a consensus::encode::Error unless our Write generated an std::io::Error"),
+                               }
+                       }
+               }
+
+               impl<R: Read> Readable<R> for $bitcoin_type {
+                       fn read(r: &mut R) -> Result<Self, DecodeError> {
+                               match consensus::encode::Decodable::consensus_decode(r) {
+                                       Ok(t) => Ok(t),
+                                       Err(consensus::encode::Error::Io(ref e)) if e.kind() == ::std::io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
+                                       Err(consensus::encode::Error::Io(e)) => Err(DecodeError::Io(e)),
+                                       Err(_) => Err(DecodeError::InvalidValue),
+                               }
+                       }
+               }
+       }
+}
+impl_consensus_ser!(Transaction);
+impl_consensus_ser!(TxOut);
+
 impl<R: Read, T: Readable<R>> Readable<R> for Mutex<T> {
        fn read(r: &mut R) -> Result<Self, DecodeError> {
                let t: T = Readable::read(r)?;
index b343ac35379f17195e785d70f6f6dea9dab5829e..9af8d5cf2f3d92db21ab81a775ae4b1b03ad8860 100644 (file)
@@ -10,8 +10,7 @@ use ln::channelmonitor::HTLCUpdate;
 use util::enforcing_trait_impls::EnforcingChannelKeys;
 use util::events;
 use util::logger::{Logger, Level, Record};
-use util::ser::ReadableArgs;
-use util::ser::Writer;
+use util::ser::{Readable, ReadableArgs, Writer, Writeable};
 
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::script::Script;
@@ -45,37 +44,65 @@ impl chaininterface::FeeEstimator for TestFeeEstimator {
        }
 }
 
-pub struct TestChannelMonitor {
+pub struct TestChannelMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
-       pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys>,
+       pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
+       pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator>,
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
 }
-impl TestChannelMonitor {
-       pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
+impl<'a> TestChannelMonitor<'a> {
+       pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: Arc<Logger>, fee_estimator: &'a TestFeeEstimator) -> Self {
                Self {
                        added_monitors: Mutex::new(Vec::new()),
+                       latest_monitor_update_id: Mutex::new(HashMap::new()),
                        simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
                        update_ret: Mutex::new(Ok(())),
                }
        }
 }
-impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor {
-       fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+impl<'a> channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor<'a> {
+       fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
                let mut w = TestVecWriter(Vec::new());
                monitor.write_for_disk(&mut w).unwrap();
-               assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
-                               &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == monitor);
+               let new_monitor = <(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
+                               &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1;
+               assert!(new_monitor == monitor);
                w.0.clear();
                monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
-               self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
-               assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
+               self.added_monitors.lock().unwrap().push((funding_txo, monitor));
+               assert!(self.simple_monitor.add_monitor(funding_txo, new_monitor).is_ok());
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
-               return self.simple_monitor.fetch_pending_htlc_updated();
+       fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+               // Every monitor update should survive roundtrip
+               let mut w = TestVecWriter(Vec::new());
+               update.write(&mut w).unwrap();
+               assert!(channelmonitor::ChannelMonitorUpdate::read(
+                               &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
+
+               self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
+               assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok());
+               // At every point where we get a monitor update, we should be able to send a useful monitor
+               // to a watchtower and disk...
+               let monitors = self.simple_monitor.monitors.lock().unwrap();
+               let monitor = monitors.get(&funding_txo).unwrap();
+               w.0.clear();
+               monitor.write_for_disk(&mut w).unwrap();
+               let new_monitor = <(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
+                               &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1;
+               assert!(new_monitor == *monitor);
+               w.0.clear();
+               monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
+               self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
+               self.update_ret.lock().unwrap().clone()
+       }
+
+       fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+               return self.simple_monitor.get_and_clear_pending_htlcs_updated();
        }
 }