Merge pull request #551 from TheBlueMatt/2020-03-no-chan-mon
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 20 Mar 2020 00:22:11 +0000 (00:22 +0000)
committerGitHub <noreply@github.com>
Fri, 20 Mar 2020 00:22:11 +0000 (00:22 +0000)
Generate latest local commitment transactions via monitor avoiding Channel's copy

fuzz/src/chanmon_consistency.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/channelmonitor.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/reorg_tests.rs

index 7b8baec7169e7793cd9312b87cc77366e140a6e4..bcacae883d3d91fc5a9f7882c2859a07adc22e3e 100644 (file)
@@ -116,7 +116,7 @@ impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMon
                };
                let mut deserialized_monitor = <(Sha256d, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
                        read(&mut Cursor::new(&map_entry.get().1), Arc::clone(&self.logger)).unwrap().1;
-               deserialized_monitor.update_monitor(update.clone()).unwrap();
+               deserialized_monitor.update_monitor(update.clone(), &&TestBroadcaster {}).unwrap();
                let mut ser = VecWriter(Vec::new());
                deserialized_monitor.write_for_disk(&mut ser).unwrap();
                map_entry.insert((update.update_id, ser.0));
index 0983cf44e4952e12530b6b02303439dc6c6294c1..d43608f376b3417c0bee9a78aeead958a1c521ad 100644 (file)
@@ -31,7 +31,7 @@ fn test_simple_monitor_permanent_update_fail() {
 
        *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
        if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
-       check_added_monitors!(nodes[0], 1);
+       check_added_monitors!(nodes[0], 2);
 
        let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events_1.len(), 2);
@@ -120,7 +120,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
 
        // ...and make sure we can force-close a frozen channel
        nodes[0].node.force_close_channel(&channel_id);
-       check_added_monitors!(nodes[0], 0);
+       check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
 
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
index a9565af9e06baf013a33384561e1bf1c12fc827a..374d74f6c18b578b8123e4f2344375164abff704 100644 (file)
@@ -1138,8 +1138,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        }
 
        /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
-       /// Ok(_) if debug assertions are turned on and preconditions are met.
+       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
+       /// return Ok(_) if debug assertions are turned on or preconditions are met.
+       ///
+       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
+       /// but then have a reorg which settles on an HTLC-failure on chain.
        fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
@@ -1167,6 +1170,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                } else {
                                                        log_warn!(self, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
                                                }
+                                               debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled");
                                                return Ok((None, None));
                                        },
                                        _ => {
@@ -1200,6 +1204,9 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                match pending_update {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
+                                                       // Make sure we don't leave latest_monitor_update_id incremented here:
+                                                       self.latest_monitor_update_id -= 1;
+                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled");
                                                        return Ok((None, None));
                                                }
                                        },
@@ -1208,6 +1215,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                        log_warn!(self, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
+                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                        return Ok((None, Some(monitor_update)));
                                                }
                                        },
@@ -1259,8 +1267,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        }
 
        /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
-       /// Ok(_) if debug assertions are turned on and preconditions are met.
+       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
+       /// return Ok(_) if debug assertions are turned on or preconditions are met.
+       ///
+       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
+       /// but then have a reorg which settles on an HTLC-failure on chain.
        pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
@@ -1277,6 +1288,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                match htlc.state {
                                        InboundHTLCState::Committed => {},
                                        InboundHTLCState::LocalRemoved(_) => {
+                                               debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled");
                                                return Ok(None);
                                        },
                                        _ => {
@@ -1297,11 +1309,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                match pending_update {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
+                                                       debug_assert!(false, "Tried to fail an HTLC that was already fulfilled");
                                                        return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
+                                                       debug_assert!(false, "Tried to fail an HTLC that was already failed");
                                                        return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
                                                }
                                        },
@@ -3760,7 +3774,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
        /// Also returns the list of payment_hashes for channels which we can safely fail backwards
        /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self) -> (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>) {
+       pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) {
                assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
 
                // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
@@ -3783,12 +3797,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.update_time_counter += 1;
-               if self.channel_monitor.is_some() {
-                       (self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs)
-               } else {
-                       // We aren't even signed funding yet, so can't broadcast anything
-                       (Vec::new(), dropped_outbound_htlcs)
-               }
+               self.latest_monitor_update_id += 1;
+               (self.funding_txo.clone(), ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+               }, dropped_outbound_htlcs)
        }
 }
 
index 307b41d37a81e1e3719379a2c959119833e23fe7..cde40ee323241cde60b14a400ab537e21477daa4 100644 (file)
@@ -28,7 +28,7 @@ use secp256k1;
 use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
 use chain::transaction::OutPoint;
 use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 use ln::features::{InitFeatures, NodeFeatures};
 use ln::router::Route;
 use ln::msgs;
@@ -152,7 +152,7 @@ pub struct PaymentHash(pub [u8;32]);
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 pub struct PaymentPreimage(pub [u8;32]);
 
-type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -502,8 +502,7 @@ macro_rules! break_chan_entry {
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        $channel_state.short_to_id.remove(&short_id);
                                }
-                               break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
-                       },
+                               break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) },
                        Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                }
        }
@@ -522,7 +521,7 @@ macro_rules! try_chan_entry {
                                if let Some(short_id) = chan.get_short_channel_id() {
                                        $channel_state.short_to_id.remove(&short_id);
                                }
-                               return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+                               return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
                        },
                        Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
                                log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
@@ -540,11 +539,7 @@ macro_rules! try_chan_entry {
                                                ChannelMonitorUpdateErr::TemporaryFailure => {},
                                        }
                                }
-                               let mut shutdown_res = chan.force_shutdown();
-                               if shutdown_res.0.len() >= 1 {
-                                       log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
-                               }
-                               shutdown_res.0.clear();
+                               let shutdown_res = chan.force_shutdown(false);
                                return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
                        }
                }
@@ -572,7 +567,7 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
                                res
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -820,14 +815,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
 
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
-               let (local_txn, mut failed_htlcs) = shutdown_res;
-               log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
+               let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+               log_trace!(self, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               for tx in local_txn {
-                       log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
-                       self.tx_broadcaster.broadcast_transaction(&tx);
+               if let Some(funding_txo) = funding_txo_option {
+                       // There isn't anything we can do if we get an update failure - we're already
+                       // force-closing. The monitor update on the required in-memory copy should broadcast
+                       // the latest local state, which is the best we can do anyway. Thus, it is safe to
+                       // ignore the result here.
+                       let _ = self.monitor.update_monitor(funding_txo, monitor_update);
                }
        }
 
@@ -849,7 +847,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
                        }
                };
                log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
-               self.finish_force_close_channel(chan.force_shutdown());
+               self.finish_force_close_channel(chan.force_shutdown(true));
                if let Ok(update) = self.get_channel_update(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1268,7 +1266,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
                                Some(mut chan) => {
                                        (chan.get_outbound_funding_created(funding_txo)
                                                .map_err(|e| if let ChannelError::Close(msg) = e {
-                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
+                                                       MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
                                                } else { unreachable!(); })
                                        , chan)
                                },
@@ -1288,7 +1286,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
                                ChannelMonitorUpdateErr::PermanentFailure => {
                                        {
                                                let mut channel_state = self.channel_state.lock().unwrap();
-                                               match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) {
+                                               match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id(), channel_state) {
                                                        Err(_) => { return; },
                                                        Ok(()) => unreachable!(),
                                                }
@@ -1518,7 +1516,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
                                                                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                                                                channel_state.short_to_id.remove(&short_id);
                                                                                        }
-                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
+                                                                                       Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
                                                                                },
                                                                                ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
@@ -2021,7 +2019,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
                                        // channel, not the temporary_channel_id. This is compatible with ourselves, but the
                                        // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
                                        // any messages referencing a previously-closed channel anyway.
-                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
+                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
                                },
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
@@ -2741,7 +2739,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                                                // It looks like our counterparty went on-chain. We go ahead and
                                                                // broadcast our latest local state as well here, just in case its
                                                                // some kind of SPV attack, though we expect these to be dropped.
-                                                               failed_channels.push(channel.force_shutdown());
+                                                               failed_channels.push(channel.force_shutdown(true));
                                                                if let Ok(update) = self.get_channel_update(&channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
@@ -2756,11 +2754,10 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                        if let Some(short_id) = channel.get_short_channel_id() {
                                                short_to_id.remove(&short_id);
                                        }
-                                       failed_channels.push(channel.force_shutdown());
                                        // If would_broadcast_at_height() is true, the channel_monitor will broadcast
                                        // the latest local tx for us, so we should skip that here (it doesn't really
                                        // hurt anything, but does make tests a bit simpler).
-                                       failed_channels.last_mut().unwrap().0 = Vec::new();
+                                       failed_channels.push(channel.force_shutdown(false));
                                        if let Ok(update) = self.get_channel_update(&channel) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
@@ -2804,7 +2801,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                        if let Some(short_id) = v.get_short_channel_id() {
                                                short_to_id.remove(&short_id);
                                        }
-                                       failed_channels.push(v.force_shutdown());
+                                       failed_channels.push(v.force_shutdown(true));
                                        if let Ok(update) = self.get_channel_update(&v) {
                                                pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
@@ -2992,7 +2989,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
                                                if let Some(short_id) = chan.get_short_channel_id() {
                                                        short_to_id.remove(&short_id);
                                                }
-                                               failed_channels.push(chan.force_shutdown());
+                                               failed_channels.push(chan.force_shutdown(true));
                                                if let Ok(update) = self.get_channel_update(&chan) {
                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                msg: update
@@ -3458,7 +3455,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
                let latest_block_height: u32 = Readable::read(reader)?;
                let last_block_hash: Sha256dHash = Readable::read(reader)?;
 
-               let mut closed_channels = Vec::new();
+               let mut failed_htlcs = Vec::new();
 
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
@@ -3477,9 +3474,9 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
                                                channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
                                                channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
                                                channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
-                                       let mut force_close_res = channel.force_shutdown();
-                                       force_close_res.0 = monitor.get_latest_local_commitment_txn();
-                                       closed_channels.push(force_close_res);
+                                       let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       failed_htlcs.append(&mut new_failed_htlcs);
+                                       monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
                                } else {
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_id.insert(short_channel_id, channel.channel_id());
@@ -3493,7 +3490,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
 
                for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
                        if !funding_txo_set.contains(funding_txo) {
-                               closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
+                               monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
                        }
                }
 
@@ -3563,12 +3560,13 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
                        default_configuration: args.default_config,
                };
 
-               for close_res in closed_channels.drain(..) {
-                       channel_manager.finish_force_close_channel(close_res);
-                       //TODO: Broadcast channel update for closed channels, but only after we've made a
-                       //connection or two.
+               for htlc_source in failed_htlcs.drain(..) {
+                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
 
+               //TODO: Broadcast channel update for closed channels, but only after we've made a
+               //connection or two.
+
                Ok((last_block_hash.clone(), channel_manager))
        }
 }
index 514a95d27db9339edc8b78989a0477695ae102ab..8c26139c4cba1fa750c8ba07e9637fcf16becf79 100644 (file)
@@ -124,9 +124,11 @@ pub enum ChannelMonitorUpdateErr {
        TemporaryFailure,
        /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
        /// different watchtower and cannot update with all watchtowers that were previously informed
-       /// of this channel). This will force-close the channel in question.
+       /// of this channel). This will force-close the channel in question (which will generate one
+       /// final ChannelMonitorUpdate which must be delivered to at least one ChannelMonitor copy).
        ///
-       /// Should also be used to indicate a failure to update the local copy of the channel monitor.
+       /// Should also be used to indicate a failure to update the local persisted copy of the channel
+       /// monitor.
        PermanentFailure,
 }
 
@@ -153,6 +155,13 @@ impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
 /// events to it, while also taking any add/update_monitor events and passing them to some remote
 /// server(s).
 ///
+/// In general, you must always have at least one local copy in memory, which must never fail to
+/// update (as it is responsible for broadcasting the latest state in case the channel is closed),
+/// and then persist it to various on-disk locations. If, for some reason, the in-memory copy fails
+/// to update (eg out-of-memory or some other condition), you must immediately shut down without
+/// taking any further action such as writing the current state to disk. This should likely be
+/// accomplished via panic!() or abort().
+///
 /// Note that any updates to a channel's monitor *must* be applied to each instance of the
 /// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
 /// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
@@ -313,7 +322,7 @@ impl<Key : Send + cmp::Eq + hash::Hash + 'static, ChanSigner: ChannelKeys, T: De
                match monitors.get_mut(&key) {
                        Some(orig_monitor) => {
                                log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor.key_storage));
-                               orig_monitor.update_monitor(update)
+                               orig_monitor.update_monitor(update, &self.broadcaster)
                        },
                        None => Err(MonitorUpdateError("No such monitor registered"))
                }
@@ -621,6 +630,13 @@ pub(super) enum ChannelMonitorUpdateStep {
        RescueRemoteCommitmentTXInfo {
                their_current_per_commitment_point: PublicKey,
        },
+       /// Used to indicate that the no future updates will occur, and likely that the latest local
+       /// commitment transaction(s) should be broadcast, as the channel has been force-closed.
+       ChannelForceClosed {
+               /// If set to false, we shouldn't broadcast the latest local commitment transaction as we
+               /// think we've fallen behind!
+               should_broadcast: bool,
+       },
 }
 
 impl Writeable for ChannelMonitorUpdateStep {
@@ -662,6 +678,10 @@ impl Writeable for ChannelMonitorUpdateStep {
                                4u8.write(w)?;
                                their_current_per_commitment_point.write(w)?;
                        },
+                       &ChannelMonitorUpdateStep::ChannelForceClosed { ref should_broadcast } => {
+                               5u8.write(w)?;
+                               should_broadcast.write(w)?;
+                       },
                }
                Ok(())
        }
@@ -715,6 +735,11 @@ impl Readable for ChannelMonitorUpdateStep {
                                        their_current_per_commitment_point: Readable::read(r)?,
                                })
                        },
+                       5u8 => {
+                               Ok(ChannelMonitorUpdateStep::ChannelForceClosed {
+                                       should_broadcast: Readable::read(r)?
+                               })
+                       },
                        _ => Err(DecodeError::InvalidValue),
                }
        }
@@ -1275,6 +1300,14 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
        }
 
+       pub(super) fn broadcast_latest_local_commitment_txn<B: Deref>(&mut self, broadcaster: &B)
+               where B::Target: BroadcasterInterface,
+       {
+               for tx in self.get_latest_local_commitment_txn().iter() {
+                       broadcaster.broadcast_transaction(tx);
+               }
+       }
+
        /// Used in Channel to cheat wrt the update_ids since it plays games, will be removed soon!
        pub(super) fn update_monitor_ooo(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
                for update in updates.updates.drain(..) {
@@ -1289,6 +1322,7 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                        self.provide_secret(idx, secret)?,
                                ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
                                        self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+                               ChannelMonitorUpdateStep::ChannelForceClosed { .. } => {},
                        }
                }
                self.latest_update_id = updates.update_id;
@@ -1299,7 +1333,9 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// itself.
        ///
        /// panics if the given update is not the next update by update_id.
-       pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+       pub fn update_monitor<B: Deref>(&mut self, mut updates: ChannelMonitorUpdate, broadcaster: &B) -> Result<(), MonitorUpdateError>
+               where B::Target: BroadcasterInterface,
+       {
                if self.latest_update_id + 1 != updates.update_id {
                        panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
                }
@@ -1315,6 +1351,13 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                                        self.provide_secret(idx, secret)?,
                                ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
                                        self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+                               ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } => {
+                                       if should_broadcast {
+                                               self.broadcast_latest_local_commitment_txn(broadcaster);
+                                       } else {
+                                               log_error!(self, "You have a toxic local commitment transaction avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take");
+                                       }
+                               }
                        }
                }
                self.latest_update_id = updates.update_id;
@@ -1929,6 +1972,9 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
        /// out-of-band the other node operator to coordinate with him if option is available to you.
        /// In any-case, choice is up to the user.
        pub fn get_latest_local_commitment_txn(&mut self) -> Vec<Transaction> {
+               // TODO: We should likely move all of the logic in here into OnChainTxHandler and unify it
+               // to ensure add_local_sig is only ever called once no matter what. This likely includes
+               // tracking state and panic!()ing if we get an update after force-closure/local-tx signing.
                log_trace!(self, "Getting signed latest local commitment transaction!");
                if let &mut Some(ref mut local_tx) = &mut self.current_local_signed_commitment_tx {
                        match self.key_storage {
@@ -2278,19 +2324,23 @@ impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
                        if let Some((source, payment_hash)) = payment_data {
                                let mut payment_preimage = PaymentPreimage([0; 32]);
                                if accepted_preimage_claim {
-                                       payment_preimage.0.copy_from_slice(&input.witness[3]);
-                                       self.pending_htlcs_updated.push(HTLCUpdate {
-                                               source,
-                                               payment_preimage: Some(payment_preimage),
-                                               payment_hash
-                                       });
+                                       if !self.pending_htlcs_updated.iter().any(|update| update.source == source) {
+                                               payment_preimage.0.copy_from_slice(&input.witness[3]);
+                                               self.pending_htlcs_updated.push(HTLCUpdate {
+                                                       source,
+                                                       payment_preimage: Some(payment_preimage),
+                                                       payment_hash
+                                               });
+                                       }
                                } else if offered_preimage_claim {
-                                       payment_preimage.0.copy_from_slice(&input.witness[1]);
-                                       self.pending_htlcs_updated.push(HTLCUpdate {
-                                               source,
-                                               payment_preimage: Some(payment_preimage),
-                                               payment_hash
-                                       });
+                                       if !self.pending_htlcs_updated.iter().any(|update| update.source == source) {
+                                               payment_preimage.0.copy_from_slice(&input.witness[1]);
+                                               self.pending_htlcs_updated.push(HTLCUpdate {
+                                                       source,
+                                                       payment_preimage: Some(payment_preimage),
+                                                       payment_hash
+                                               });
+                                       }
                                } else {
                                        log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
                                        match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
index bd8394d14729ec77bc9c91e129b112b2f5199ffe..2da764b300b8bc2425aa7ff252fbba3749cdb7e9 100644 (file)
@@ -131,14 +131,16 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
                        // same set of outputs to watch for on chain as we have now. Note that if we write
                        // tests that fully close channels and remove the monitors at some point this may break.
                        let feeest = test_utils::TestFeeEstimator { sat_per_kw: 253 };
-                       let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
                        let mut deserialized_monitors = Vec::new();
-                       for (_, old_monitor) in old_monitors.iter() {
-                               let mut w = test_utils::TestVecWriter(Vec::new());
-                               old_monitor.write_for_disk(&mut w).unwrap();
-                               let (_, deserialized_monitor) = <(Sha256d, ChannelMonitor<EnforcingChannelKeys>)>::read(
-                                       &mut ::std::io::Cursor::new(&w.0), Arc::clone(&self.logger) as Arc<Logger>).unwrap();
-                               deserialized_monitors.push(deserialized_monitor);
+                       {
+                               let old_monitors = self.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                               for (_, old_monitor) in old_monitors.iter() {
+                                       let mut w = test_utils::TestVecWriter(Vec::new());
+                                       old_monitor.write_for_disk(&mut w).unwrap();
+                                       let (_, deserialized_monitor) = <(Sha256d, ChannelMonitor<EnforcingChannelKeys>)>::read(
+                                               &mut ::std::io::Cursor::new(&w.0), Arc::clone(&self.logger) as Arc<Logger>).unwrap();
+                                       deserialized_monitors.push(deserialized_monitor);
+                               }
                        }
 
                        // Before using all the new monitors to check the watch outpoints, use the full set of
@@ -255,6 +257,22 @@ macro_rules! get_feerate {
        }
 }
 
+macro_rules! get_local_commitment_txn {
+       ($node: expr, $channel_id: expr) => {
+               {
+                       let mut monitors = $node.chan_monitor.simple_monitor.monitors.lock().unwrap();
+                       let mut commitment_txn = None;
+                       for (funding_txo, monitor) in monitors.iter_mut() {
+                               if funding_txo.to_channel_id() == $channel_id {
+                                       commitment_txn = Some(monitor.get_latest_local_commitment_txn());
+                                       break;
+                               }
+                       }
+                       commitment_txn.unwrap()
+               }
+       }
+}
+
 pub fn create_funding_transaction<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
        let chan_id = *node.network_chan_count.borrow();
 
index bd4244091c7a21ccae4e405627d51e889011821b..e00e036db53478279dcc8abf4e79af660f519d62 100644 (file)
@@ -580,16 +580,15 @@ fn test_update_fee_that_funder_cannot_afford() {
        //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
        //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
        {
-               let mut chan_lock = nodes[1].node.channel_state.lock().unwrap();
-               let chan = chan_lock.by_id.get_mut(&channel_id).unwrap();
+               let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone();
 
                //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
-               let num_htlcs = chan.channel_monitor().get_latest_local_commitment_txn()[0].output.len() - 2;
+               let num_htlcs = commitment_tx.output.len() - 2;
                let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
-               let mut actual_fee = chan.channel_monitor().get_latest_local_commitment_txn()[0].output.iter().fold(0, |acc, output| acc + output.value);
+               let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value);
                actual_fee = channel_value - actual_fee;
                assert_eq!(total_fee, actual_fee);
-       } //drop the mutex
+       }
 
        //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
        //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
@@ -605,9 +604,8 @@ fn test_update_fee_that_funder_cannot_afford() {
        //Should produce and error.
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1);
-
-       //clear the message we could not handle
-       nodes[1].node.get_and_clear_pending_msg_events();
+       check_added_monitors!(nodes[1], 1);
+       check_closed_broadcast!(nodes[1], true);
 }
 
 #[test]
@@ -1141,6 +1139,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                // checks it, but in this case nodes[0] didn't ever get a chance to receive a
                // closing_signed so we do it ourselves
                check_closed_broadcast!(nodes[0], false);
+               check_added_monitors!(nodes[0], 1);
        }
 
        assert!(nodes[0].node.list_channels().is_empty());
@@ -1475,7 +1474,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
        check_added_monitors!(nodes[0], 1);
 
        // Broadcast node 1 commitment txn
-       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
 
        assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
        let mut has_both_htlcs = 0; // check htlcs match ones committed
@@ -1490,6 +1489,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
 
        let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
+       check_added_monitors!(nodes[0], 1);
 
        // Check we only broadcast 1 timeout tx
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
@@ -1670,6 +1670,7 @@ fn do_channel_reserve_test(test_recv: bool) {
                        assert_eq!(nodes[1].node.list_channels().len(), 1);
                        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
                        assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value");
+                       check_added_monitors!(nodes[1], 1);
                        return;
                }
        }
@@ -1959,10 +1960,12 @@ fn channel_monitor_network_test() {
 
        // Simple case with no pending HTLCs:
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+       check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+               check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
        }
        get_announce_close_broadcast_events(&nodes, 0, 1);
@@ -1974,10 +1977,12 @@ fn channel_monitor_network_test() {
 
        // Simple case of one pending HTLC to HTLC-Timeout
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
+       check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+               check_added_monitors!(nodes[2], 1);
                test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
        }
        get_announce_close_broadcast_events(&nodes, 1, 2);
@@ -2007,6 +2012,7 @@ fn channel_monitor_network_test() {
        // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
        nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+       check_added_monitors!(nodes[2], 1);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
@@ -2017,6 +2023,7 @@ fn channel_monitor_network_test() {
 
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[3].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
+               check_added_monitors!(nodes[3], 1);
 
                check_preimage_claim(&nodes[3], &node_txn);
        }
@@ -2043,6 +2050,7 @@ fn channel_monitor_network_test() {
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
+               check_added_monitors!(nodes[3], 1);
 
                // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
                {
@@ -2067,6 +2075,7 @@ fn channel_monitor_network_test() {
                        nodes[4].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
 
+               check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
 
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
@@ -2101,7 +2110,7 @@ fn test_justice_tx() {
        // A pending HTLC which will be revoked:
        let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        // Get the will-be-revoked local txn from nodes[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter_mut().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2);
        assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
@@ -2124,12 +2133,14 @@ fn test_justice_tx() {
                        node_txn.swap_remove(0);
                        node_txn.truncate(1);
                }
+               check_added_monitors!(nodes[1], 1);
                test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
 
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                // Verify broadcast of revoked HTLC-timeout
                let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               check_added_monitors!(nodes[0], 1);
                // Broadcast revoked HTLC-timeout on node 1
                nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
                test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone());
@@ -2150,7 +2161,7 @@ fn test_justice_tx() {
        // A pending HTLC which will be revoked:
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        // Get the will-be-revoked local txn from B
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter_mut().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2);
        assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
@@ -2168,11 +2179,13 @@ fn test_justice_tx() {
                        check_spends!(node_txn[0], revoked_local_txn[0]);
                        node_txn.swap_remove(0);
                }
+               check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
 
                nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
                let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
                header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               check_added_monitors!(nodes[1], 1);
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
                test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone());
        }
@@ -2191,7 +2204,7 @@ fn revoked_output_claim() {
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(revoked_local_txn.len(), 1);
        // Only output is the full channel value back to nodes[0]:
        assert_eq!(revoked_local_txn[0].output.len(), 1);
@@ -2201,6 +2214,7 @@ fn revoked_output_claim() {
        // Inform nodes[1] that nodes[0] broadcast a stale tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2); // ChannelMonitor: justice tx against revoked to_local output, ChannelManager: local commitment tx
 
@@ -2210,6 +2224,7 @@ fn revoked_output_claim() {
        // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        get_announce_close_broadcast_events(&nodes, 0, 1);
+       check_added_monitors!(nodes[0], 1)
 }
 
 #[test]
@@ -2230,7 +2245,7 @@ fn claim_htlc_outputs_shared_tx() {
        let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 
        // Get the will-be-revoked local txn from node[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
@@ -2245,7 +2260,9 @@ fn claim_htlc_outputs_shared_tx() {
        {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               check_added_monitors!(nodes[0], 1);
                nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+               check_added_monitors!(nodes[1], 1);
                connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 
                let events = nodes[1].node.get_and_clear_pending_events();
@@ -2306,7 +2323,7 @@ fn claim_htlc_outputs_single_tx() {
        let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 
        // Get the will-be-revoked local txn from node[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
 
        //Revoke the old state
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1, 3_000_000);
@@ -2314,7 +2331,9 @@ fn claim_htlc_outputs_single_tx() {
        {
                let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+               check_added_monitors!(nodes[0], 1);
                nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+               check_added_monitors!(nodes[1], 1);
                connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
 
                let events = nodes[1].node.get_and_clear_pending_events();
@@ -2415,7 +2434,7 @@ fn test_htlc_on_chain_success() {
 
        // Broadcast legit commitment tx from C on B's chain
        // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        assert_eq!(commitment_tx.len(), 1);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(our_payment_preimage, 3_000_000);
@@ -2429,6 +2448,7 @@ fn test_htlc_on_chain_success() {
 
        nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
+       check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 7);
        assert_eq!(node_txn[0], node_txn[3]);
@@ -2447,6 +2467,12 @@ fn test_htlc_on_chain_success() {
 
        // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
        nodes[1].block_notifier.block_connected(&Block { header, txdata: node_txn}, 1);
+       {
+               let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+               assert_eq!(added_monitors.len(), 1);
+               assert_eq!(added_monitors[0].0.txid, chan_2.3.txid());
+               added_monitors.clear();
+       }
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        {
                let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
@@ -2515,10 +2541,11 @@ fn test_htlc_on_chain_success() {
 
        // Broadcast legit commitment tx from A on B's chain
        // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        check_spends!(commitment_tx[0], chan_1.3);
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 1 (HTLC-Success)
        assert_eq!(node_txn.len(), 4);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -2537,6 +2564,7 @@ fn test_htlc_on_chain_success() {
        // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        let mut first_claimed = false;
@@ -2583,7 +2611,7 @@ fn test_htlc_on_chain_timeout() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 
        // Broadcast legit commitment tx from C on B's chain
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
@@ -2604,6 +2632,7 @@ fn test_htlc_on_chain_timeout() {
        };
        nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
+       check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan_2.3);
@@ -2635,7 +2664,7 @@ fn test_htlc_on_chain_timeout() {
 
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![timeout_tx]}, 1);
        connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
-       check_added_monitors!(nodes[1], 0);
+       check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false);
 
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -2656,11 +2685,12 @@ fn test_htlc_on_chain_timeout() {
        assert_eq!(node_txn.len(), 0);
 
        // Broadcast legit commitment tx from B on A's chain
-       let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[1], chan_1.2);
        check_spends!(commitment_tx[0], chan_1.3);
 
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 3);
        check_spends!(node_txn[0], commitment_tx[0]);
@@ -2687,7 +2717,7 @@ fn test_simple_commitment_revoked_fail_backward() {
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
        // Get the will-be-revoked local txn from nodes[2]
-       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, 3_000_000);
 
@@ -2696,7 +2726,7 @@ fn test_simple_commitment_revoked_fail_backward() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
-       check_added_monitors!(nodes[1], 0);
+       check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false);
 
        expect_pending_htlcs_forwardable!(nodes[1]);
@@ -2758,7 +2788,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
        // Get the will-be-revoked local txn from nodes[2]
-       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, if no_to_remote { 10_000 } else { 3_000_000});
@@ -2851,6 +2881,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+       check_added_monitors!(nodes[1], 1);
        connect_blocks(&nodes[1].block_notifier, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
 
        let events = nodes[1].node.get_and_clear_pending_events();
@@ -2970,6 +3001,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -2977,6 +3009,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]}, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        // Duplicate the block_connected call since this may happen due to other listeners
        // registering new transactions
@@ -3028,6 +3061,7 @@ fn test_force_close_fail_back() {
 
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
        check_closed_broadcast!(nodes[2], false);
+       check_added_monitors!(nodes[2], 1);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
@@ -3042,6 +3076,7 @@ fn test_force_close_fail_back() {
 
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
        {
@@ -3087,6 +3122,7 @@ fn test_unconf_chan() {
                height -= 1;
        }
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
        let channel_state = nodes[0].node.channel_state.lock().unwrap();
        assert_eq!(channel_state.by_id.len(), 0);
        assert_eq!(channel_state.short_to_id.len(), 0);
@@ -4005,6 +4041,7 @@ fn test_claim_sizeable_push_msat() {
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::supported(), InitFeatures::supported());
        nodes[1].node.force_close_channel(&chan.2);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
        check_spends!(node_txn[0], chan.3);
@@ -4029,6 +4066,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::supported(), InitFeatures::supported());
        nodes[0].node.force_close_channel(&chan.2);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@ -4038,6 +4076,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
        let spend_txn = check_spendable_outputs!(nodes[1], 1);
        assert_eq!(spend_txn.len(), 2);
        assert_eq!(spend_txn[0], spend_txn[1]);
@@ -4056,7 +4095,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, InitFeatures::supported(), InitFeatures::supported());
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
@@ -4064,6 +4103,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
        let  header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        let spend_txn = check_spendable_outputs!(nodes[1], 1);
@@ -4085,7 +4125,7 @@ fn test_static_spendable_outputs_preimage_tx() {
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(commitment_tx[0].input.len(), 1);
        assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -4094,6 +4134,7 @@ fn test_static_spendable_outputs_preimage_tx() {
        assert!(nodes[1].node.claim_funds(payment_preimage, 3_000_000));
        check_added_monitors!(nodes[1], 1);
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
+       check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4129,7 +4170,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter_mut().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -4138,6 +4179,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        let  header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 2);
@@ -4160,7 +4202,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -4170,6 +4212,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        // A will generate HTLC-Timeout from revoked commitment tx
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(revoked_htlc_txn.len(), 3);
@@ -4182,6 +4225,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        // B will generate justice tx from A's revoked commitment/HTLC tx
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 4); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-timeout, adjusted justice tx, ChannelManager: local commitment tx
@@ -4211,7 +4255,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -4221,6 +4265,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        // B will generate HTLC-Success from revoked commitment tx
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
 
        assert_eq!(revoked_htlc_txn.len(), 3);
@@ -4232,6 +4277,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        // A will generate justice tx from B's revoked commitment/HTLC tx
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 3); // ChannelMonitor: justice tx on revoked commitment, justice tx on revoked HTLC-success, ChannelManager: local commitment tx
@@ -4272,7 +4318,7 @@ fn test_onchain_to_onchain_claim() {
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
        check_spends!(commitment_tx[0], chan_2.3);
        nodes[2].node.claim_funds(payment_preimage, 3_000_000);
        check_added_monitors!(nodes[2], 1);
@@ -4284,6 +4330,7 @@ fn test_onchain_to_onchain_claim() {
 
        nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2], false);
+       check_added_monitors!(nodes[2], 1);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        assert_eq!(c_txn.len(), 4);
@@ -4314,6 +4361,7 @@ fn test_onchain_to_onchain_claim() {
                assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
                b_txn.clear();
        }
+       check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
        check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
@@ -4331,7 +4379,7 @@ fn test_onchain_to_onchain_claim() {
                _ => panic!("Unexpected event"),
        };
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx
@@ -4344,6 +4392,7 @@ fn test_onchain_to_onchain_claim() {
        assert_eq!(b_txn[0].lock_time, 0); // Success tx
 
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -4362,13 +4411,14 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        *nodes[0].network_payment_count.borrow_mut() -= 1;
        assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash);
 
-       let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
        assert_eq!(commitment_txn[0].input.len(), 1);
        check_spends!(commitment_txn[0], chan_2.3);
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        let htlc_timeout_tx;
        { // Extract one of the two HTLC-Timeout transaction
@@ -4390,7 +4440,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
 
        nodes[2].node.claim_funds(our_payment_preimage, 900_000);
        nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
-       check_added_monitors!(nodes[2], 2);
+       check_added_monitors!(nodes[2], 3);
        let events = nodes[2].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4479,7 +4529,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
-       let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3);
 
@@ -4488,6 +4538,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        check_added_monitors!(nodes[1], 1);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
+       check_added_monitors!(nodes[1], 1);
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        match events[0] {
                MessageSendEvent::UpdateHTLCs { .. } => {},
@@ -4536,7 +4587,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000, 500_000);
        send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000, 500_000);
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn()[0].output.len(), 2);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
        // 0th HTLC:
@@ -4573,8 +4624,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Double-check that six of the new HTLC were added
        // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
        // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn().len(), 1);
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn()[0].output.len(), 8);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
+       assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
 
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
@@ -4605,7 +4656,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
        commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
 
-       let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
 
        expect_pending_htlcs_forwardable!(nodes[3]);
        check_added_monitors!(nodes[3], 1);
@@ -4633,7 +4684,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        //
        // Alternatively, we may broadcast the previous commitment transaction, which should only
        // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
-       let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        if announce_latest {
@@ -4644,7 +4695,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        connect_blocks(&nodes[2].block_notifier, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
        check_closed_broadcast!(nodes[2], false);
        expect_pending_htlcs_forwardable!(nodes[2]);
-       check_added_monitors!(nodes[2], 2);
+       check_added_monitors!(nodes[2], 3);
 
        let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
        assert_eq!(cs_msgs.len(), 2);
@@ -4773,7 +4824,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
 
        route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
-       let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3);
 
@@ -4781,6 +4832,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn[0].input.len(), 1);
@@ -4862,6 +4914,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
        }
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 }
 
 fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
@@ -4890,6 +4943,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        }
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 }
 
 fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
@@ -4933,6 +4987,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
        if !check_revoke_no_close {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], false);
+               check_added_monitors!(nodes[0], 1);
        } else {
                let events = nodes[0].node.get_and_clear_pending_events();
                assert_eq!(events.len(), 1);
@@ -5558,6 +5613,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1);
        check_closed_broadcast!(nodes[1], true).unwrap();
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5682,6 +5738,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote side tried to send less than our minimum HTLC value");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5707,6 +5764,7 @@ fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote HTLC add would put them over their reserve value");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5752,6 +5810,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to push more than our max accepted HTLCs");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5773,6 +5832,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote HTLC add would put them over our max HTLC value");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5794,6 +5854,7 @@ fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5839,6 +5900,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
        assert_eq!(err_msg.data, "Remote skipped HTLC ID");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -5869,6 +5931,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -5899,6 +5962,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -5930,6 +5994,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -5970,6 +6035,7 @@ fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find");
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -6010,9 +6076,9 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Remote tried to fulfill HTLC with an incorrect preimage");
+       check_added_monitors!(nodes[0], 1);
 }
 
-
 #[test]
 fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
        //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
@@ -6055,6 +6121,7 @@ fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_messag
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
        assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set");
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -6154,7 +6221,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        // Cache one local commitment tx as previous
-       let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let as_prev_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
 
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
@@ -6168,7 +6235,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        check_added_monitors!(nodes[0], 1);
 
        // Cache one local commitment tx as lastest
-       let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
 
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -6194,12 +6261,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_prev_commitment_tx[0].clone()]}, 1);
        }
 
-       let events = nodes[0].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-               _ => panic!("Unexpected event"),
-       }
+       check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
        connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 1, true,  header.bitcoin_hash());
@@ -6301,8 +6364,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let (_payment_preimage_1, dust_hash) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        let (_payment_preimage_2, non_dust_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
-       let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
+       let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
 
        // We revoked bs_commitment_tx
        if revoked {
@@ -6315,12 +6378,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![as_commitment_tx[0].clone()]}, 1);
-               let events = nodes[0].node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               match events[0] {
-                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-                       _ => panic!("Unexpected event"),
-               }
+               check_closed_broadcast!(nodes[0], false);
+               check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
                let parent_hash  = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
@@ -6350,13 +6409,9 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        } else {
                // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
                nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![bs_commitment_tx[0].clone()]}, 1);
+               check_closed_broadcast!(nodes[0], false);
+               check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
-               let events = nodes[0].node.get_and_clear_pending_msg_events();
-               assert_eq!(events.len(), 1);
-               match events[0] {
-                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-                       _ => panic!("Unexpected event"),
-               }
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
                let parent_hash  = connect_blocks(&nodes[0].block_notifier, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
                let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
@@ -6437,20 +6492,8 @@ fn test_upfront_shutdown_script() {
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
        // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
        nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
-       let events = nodes[2].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 2);
-       match events[0] {
-               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-               _ => panic!("Unexpected event"),
-       }
-       if let MessageSendEvent::HandleError { ref action, .. } = events[1] {
-               match action {
-                       &ErrorAction::SendErrorMessage { ref msg } => {
-                               assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
-                       },
-                       _ => { assert!(false); }
-               }
-       } else { assert!(false); }
+       assert_eq!(check_closed_broadcast!(nodes[2], true).unwrap().data, "Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
+       check_added_monitors!(nodes[2], 1);
 
        // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
@@ -6642,7 +6685,7 @@ fn test_data_loss_protect() {
 
        // Check we update monitor following learning of per_commitment_point from B
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
-       check_added_monitors!(nodes[0], 1);
+       check_added_monitors!(nodes[0], 2);
 
        {
                let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
@@ -6669,22 +6712,9 @@ fn test_data_loss_protect() {
 
        // Check we close channel detecting A is fallen-behind
        nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
-       let events = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 2);
-       match events[0] {
-               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
-               _ => panic!("Unexpected event"),
-       }
-       match events [1] {
-               MessageSendEvent::HandleError { ref action, .. } => {
-                       match action {
-                               &ErrorAction::SendErrorMessage { ref msg } => {
-                                       assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
-                               _ => panic!("Unexpected event!"),
-                       }
-               },
-               _ => panic!("Unexpected event"),
-       }
+       assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
+       check_added_monitors!(nodes[1], 1);
+
 
        // Check A is able to claim to_remote output
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
@@ -6825,7 +6855,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 3000000, 30).unwrap();
        send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
 
-       let revoked_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2);
        // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
        assert_eq!(revoked_txn[0].output.len(), 4);
        assert_eq!(revoked_txn[0].input.len(), 1);
@@ -6846,6 +6876,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: header_114, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_txn[0].clone()] }, 115);
+       check_added_monitors!(nodes[1], 1);
 
        // One or more justice tx should have been broadcast, check it
        let penalty_1;
@@ -6927,7 +6958,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3_000_000).0;
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
 
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
@@ -6938,6 +6969,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
        check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
 
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(revoked_htlc_txn.len(), 6);
@@ -7025,6 +7057,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
                node_txn.clear();
        }
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
 }
 
 #[test]
@@ -7046,7 +7079,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
 
        // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
-       let remote_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let remote_txn = get_local_commitment_txn!(nodes[0], chan.2);
        assert_eq!(remote_txn[0].output.len(), 4);
        assert_eq!(remote_txn[0].input.len(), 1);
        assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
@@ -7055,7 +7088,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        nodes[1].node.claim_funds(payment_preimage, 3_000_000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
-       check_added_monitors!(nodes[1], 1);
+       check_added_monitors!(nodes[1], 2);
 
        // One or more claim tx should have been broadcast, check it
        let timeout;
@@ -7154,7 +7187,7 @@ fn test_set_outpoints_partial_claiming() {
        let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
 
        // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
-       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(remote_txn.len(), 3);
        assert_eq!(remote_txn[0].output.len(), 4);
        assert_eq!(remote_txn[0].input.len(), 1);
@@ -7174,6 +7207,8 @@ fn test_set_outpoints_partial_claiming() {
        // Connect blocks on node A commitment transaction
        let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
+       check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
        // Verify node A broadcast tx claiming both HTLCs
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7186,10 +7221,11 @@ fn test_set_outpoints_partial_claiming() {
                assert_eq!(node_txn[0].input.len(), 2);
                node_txn.clear();
        }
-       nodes[0].node.get_and_clear_pending_msg_events();
 
        // Connect blocks on node B
        connect_blocks(&nodes[1].block_notifier, 135, 0, false, Default::default());
+       check_closed_broadcast!(nodes[1], false);
+       check_added_monitors!(nodes[1], 1);
        // Verify node B broadcast 2 HTLC-timeout txn
        let partial_claim_tx = {
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@ -7200,7 +7236,6 @@ fn test_set_outpoints_partial_claiming() {
                assert_eq!(node_txn[2].input.len(), 1);
                node_txn[1].clone()
        };
-       nodes[1].node.get_and_clear_pending_msg_events();
 
        // Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
        let header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
@@ -7259,6 +7294,7 @@ fn test_counterparty_raa_skip_no_crash() {
        nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
                &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
        assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
+       check_added_monitors!(nodes[1], 1);
 }
 
 #[test]
@@ -7276,7 +7312,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
 
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
@@ -7288,6 +7324,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_128, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        nodes[0].block_notifier.block_connected(&Block { header: header_129, txdata: vec![revoked_local_txn[0].clone()] }, 129);
        check_closed_broadcast!(nodes[0], false);
+       check_added_monitors!(nodes[0], 1);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 4); //ChannelMonitor: justice txn * 3, ChannelManager: local commitment tx
index 2a4f79a9cc6ff08319b1f71ebf04a052016159eb..bfc0f56698103b7cb7d54f9b19f74dfe0355f27b 100644 (file)
@@ -47,7 +47,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
        let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        let claim_txn = if local_commitment {
                // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
-               let node_1_commitment_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+               let node_1_commitment_txn = get_local_commitment_txn!(nodes[1], chan_2.2);
                assert_eq!(node_1_commitment_txn.len(), 2); // 1 local commitment tx, 1 Outbound HTLC-Timeout
                assert_eq!(node_1_commitment_txn[0].output.len(), 2); // to-self and Offered HTLC (to-remote/to-node-3 is dust)
                check_spends!(node_1_commitment_txn[0], chan_2.3);
@@ -55,6 +55,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
 
                // Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
                nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+               check_added_monitors!(nodes[2], 1);
                check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
                let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
@@ -70,7 +71,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
        } else {
                // Broadcast node 2 commitment txn
-               let node_2_commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+               let node_2_commitment_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
                assert_eq!(node_2_commitment_txn.len(), 2); // 1 local commitment tx, 1 Received HTLC-Claim
                assert_eq!(node_2_commitment_txn[0].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
                check_spends!(node_2_commitment_txn[0], chan_2.3);
@@ -90,6 +91,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
                // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
                node_2_commitment_txn
        };
+       check_added_monitors!(nodes[1], 1);
        check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
        headers.push(header.clone());
        // At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH +