]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Drop duplicative current-local-tx storage in channel.
authorMatt Corallo <git@bluematt.me>
Fri, 13 Dec 2019 03:42:08 +0000 (22:42 -0500)
committerMatt Corallo <git@bluematt.me>
Tue, 24 Dec 2019 17:14:09 +0000 (12:14 -0500)
We now have current-local-tx broadcast ability in channel monitors
directly (for ChannelManager deserialization), so we can just use
that instead of always having the Channel store signed ready-to-go
copies of the latest local commitment transaction.

This is further kinda nice since ChannelMonitor is live and can, eg
broadcast HTLC-Success transactions immediately as they will be
generated at broadcast time instead of in advance.

Finally, this lets us clean up a tiny bit in Channel.

lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/channelmonitor.rs
lightning/src/ln/functional_tests.rs

index f1330fb92029e22b9be5e53a99f29c2c9725df2f..889fec05c8f84bfb522222bfeb712bdd40cd2f7a 100644 (file)
@@ -4,7 +4,7 @@ use bitcoin::blockdata::transaction::{TxIn, TxOut, Transaction, SigHashType};
 use bitcoin::blockdata::opcodes;
 use bitcoin::util::hash::BitcoinHash;
 use bitcoin::util::bip143;
-use bitcoin::consensus::encode::{self, Encodable, Decodable};
+use bitcoin::consensus::encode;
 
 use bitcoin_hashes::{Hash, HashEngine};
 use bitcoin_hashes::sha256::Hash as Sha256;
@@ -25,7 +25,7 @@ use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
 use chain::transaction::OutPoint;
 use chain::keysinterface::{ChannelKeys, KeysInterface};
 use util::transaction_utils;
-use util::ser::{Readable, ReadableArgs, Writeable, Writer, WriterWriteAdaptor};
+use util::ser::{Readable, ReadableArgs, Writeable, Writer};
 use util::logger::{Logger, LogHolder};
 use util::errors::APIError;
 use util::config::{UserConfig,ChannelConfig};
@@ -297,12 +297,6 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
        /// Max to_local and to_remote outputs in a remote-generated commitment transaction
        max_commitment_tx_output_remote: ::std::sync::Mutex<(u64, u64)>,
 
-       #[cfg(test)]
-       // Used in ChannelManager's tests to send a revoked transaction
-       pub last_local_commitment_txn: Vec<Transaction>,
-       #[cfg(not(test))]
-       last_local_commitment_txn: Vec<Transaction>,
-
        last_sent_closing_fee: Option<(u64, u64, Signature)>, // (feerate, fee, our_sig)
 
        /// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
@@ -498,8 +492,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        #[cfg(debug_assertions)]
                        max_commitment_tx_output_remote: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 
-                       last_local_commitment_txn: Vec::new(),
-
                        last_sent_closing_fee: None,
 
                        funding_tx_confirmed_in: None,
@@ -716,8 +708,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        #[cfg(debug_assertions)]
                        max_commitment_tx_output_remote: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 
-                       last_local_commitment_txn: Vec::new(),
-
                        last_sent_closing_fee: None,
 
                        funding_tx_confirmed_in: None,
@@ -1185,8 +1175,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                Ok((htlc_redeemscript, self.secp_ctx.sign(&sighash, &our_htlc_key), is_local_tx))
        }
 
+       #[cfg(test)]
        /// Signs a transaction created by build_htlc_transaction. If the transaction is an
        /// HTLC-Success transaction (ie htlc.offered is false), preimage must be set!
+       /// TODO: Make this a chan_utils, use it in channelmonitor and tests, cause its unused now
        fn sign_htlc_transaction(&self, tx: &mut Transaction, their_sig: &Signature, preimage: &Option<PaymentPreimage>, htlc: &HTLCOutputInCommitment, keys: &TxCreationKeys) -> Result<Signature, ChannelError> {
                if tx.input.len() != 1 {
                        panic!("Tried to sign HTLC transaction that had input count != 1!");
@@ -1556,7 +1548,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                // Now that we're past error-generating stuff, update our local state:
 
                self.channel_monitor.provide_latest_remote_commitment_tx_info(&remote_initial_commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
-               self.last_local_commitment_txn = vec![local_initial_commitment_tx.clone()];
                self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx, local_keys, self.feerate_per_kw, Vec::new());
                self.channel_state = ChannelState::FundingSent as u32;
                self.channel_id = funding_txo.to_channel_id();
@@ -1594,8 +1585,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                secp_check!(self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey.unwrap()), "Invalid funding_signed signature from peer");
 
                self.sign_commitment_transaction(&mut local_initial_commitment_tx, &msg.signature);
-               self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys, self.feerate_per_kw, Vec::new());
-               self.last_local_commitment_txn = vec![local_initial_commitment_tx];
+               self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx, local_keys, self.feerate_per_kw, Vec::new());
                self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32));
                self.cur_local_commitment_transaction_number -= 1;
 
@@ -1859,25 +1849,17 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        return Err(ChannelError::Close("Got wrong number of HTLC signatures from remote"));
                }
 
-               let mut new_local_commitment_txn = Vec::with_capacity(local_commitment_tx.1 + 1);
                self.sign_commitment_transaction(&mut local_commitment_tx.0, &msg.signature);
-               new_local_commitment_txn.push(local_commitment_tx.0.clone());
 
                let mut htlcs_and_sigs = Vec::with_capacity(local_commitment_tx.2.len());
                for (idx, (htlc, source)) in local_commitment_tx.2.drain(..).enumerate() {
                        if let Some(_) = htlc.transaction_output_index {
-                               let mut htlc_tx = self.build_htlc_transaction(&local_commitment_txid, &htlc, true, &local_keys, feerate_per_kw);
+                               let htlc_tx = self.build_htlc_transaction(&local_commitment_txid, &htlc, true, &local_keys, feerate_per_kw);
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &local_keys);
                                log_trace!(self, "Checking HTLC tx signature {} by key {} against tx {} with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(local_keys.b_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), encode::serialize_hex(&htlc_redeemscript));
                                let htlc_sighash = hash_to_message!(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]);
                                secp_check!(self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key), "Invalid HTLC tx signature from peer");
-                               let htlc_sig = if htlc.offered {
-                                       let htlc_sig = self.sign_htlc_transaction(&mut htlc_tx, &msg.htlc_signatures[idx], &None, &htlc, &local_keys)?;
-                                       new_local_commitment_txn.push(htlc_tx);
-                                       htlc_sig
-                               } else {
-                                       self.create_htlc_tx_signature(&htlc_tx, &htlc, &local_keys)?.1
-                               };
+                               let htlc_sig = self.create_htlc_tx_signature(&htlc_tx, &htlc, &local_keys)?.1;
                                htlcs_and_sigs.push((htlc, Some((msg.htlc_signatures[idx], htlc_sig)), source));
                        } else {
                                htlcs_and_sigs.push((htlc, None, source));
@@ -1923,7 +1905,6 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                }
 
                self.cur_local_commitment_transaction_number -= 1;
-               self.last_local_commitment_txn = new_local_commitment_txn;
                // Note that if we need_our_commitment & !AwaitingRemoteRevoke we'll call
                // send_commitment_no_status_check() next which will reset this to RAAFirst.
                self.resend_order = RAACommitmentOrder::CommitmentFirst;
@@ -2881,11 +2862,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        }
 
        /// May only be called after funding has been initiated (ie is_funding_initiated() is true)
-       pub fn channel_monitor(&self) -> ChannelMonitor {
+       pub fn channel_monitor(&self) -> &ChannelMonitor {
                if self.channel_state < ChannelState::FundingCreated as u32 {
                        panic!("Can't get a channel monitor until funding has been created");
                }
-               self.channel_monitor.clone()
+               &self.channel_monitor
        }
 
        /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
@@ -3681,9 +3662,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
                self.channel_update_count += 1;
-               let mut res = Vec::new();
-               mem::swap(&mut res, &mut self.last_local_commitment_txn);
-               (res, dropped_outbound_htlcs)
+               (self.channel_monitor.get_latest_local_commitment_txn(), dropped_outbound_htlcs)
        }
 }
 
@@ -3873,16 +3852,6 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
                self.channel_update_count.write(writer)?;
                self.feerate_per_kw.write(writer)?;
 
-               (self.last_local_commitment_txn.len() as u64).write(writer)?;
-               for tx in self.last_local_commitment_txn.iter() {
-                       if let Err(e) = tx.consensus_encode(&mut WriterWriteAdaptor(writer)) {
-                               match e {
-                                       encode::Error::Io(e) => return Err(e),
-                                       _ => panic!("last_local_commitment_txn must have been well-formed!"),
-                               }
-                       }
-               }
-
                match self.last_sent_closing_fee {
                        Some((feerate, fee, sig)) => {
                                1u8.write(writer)?;
@@ -4041,15 +4010,6 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                let channel_update_count = Readable::read(reader)?;
                let feerate_per_kw = Readable::read(reader)?;
 
-               let last_local_commitment_txn_count: u64 = Readable::read(reader)?;
-               let mut last_local_commitment_txn = Vec::with_capacity(cmp::min(last_local_commitment_txn_count as usize, OUR_MAX_HTLCS as usize*2 + 1));
-               for _ in 0..last_local_commitment_txn_count {
-                       last_local_commitment_txn.push(match Transaction::consensus_decode(reader.by_ref()) {
-                               Ok(tx) => tx,
-                               Err(_) => return Err(DecodeError::InvalidValue),
-                       });
-               }
-
                let last_sent_closing_fee = match <u8 as Readable<R>>::read(reader)? {
                        0 => None,
                        1 => Some((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)),
@@ -4132,8 +4092,6 @@ impl<R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArgs<R,
                        #[cfg(debug_assertions)]
                        max_commitment_tx_output_remote: ::std::sync::Mutex::new((0, 0)),
 
-                       last_local_commitment_txn,
-
                        last_sent_closing_fee,
 
                        funding_tx_confirmed_in,
index 2773c1758e559fb41f86ba5799f401d2c405d529..d3b4ab96dc8beb7ed314d2045067509a64685c1b 100644 (file)
@@ -1791,7 +1791,7 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
                        let pending_msg_events = channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                if channel.is_awaiting_monitor_update() {
-                                       let chan_monitor = channel.channel_monitor();
+                                       let chan_monitor = channel.channel_monitor().clone();
                                        if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                                match e {
                                                        ChannelMonitorUpdateErr::PermanentFailure => {
index ea84b984ed7bb122e540c99fb102c7d62498bba9..bde08bc514a816d5f52848772d9524528b87e0e6 100644 (file)
@@ -2316,6 +2316,7 @@ impl ChannelMonitor {
        /// out-of-band the other node operator to coordinate with him if option is available to you.
        /// In any-case, choice is up to the user.
        pub fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
+               log_trace!(self, "Getting signed latest local commitment transaction!");
                if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
                        let mut res = vec![local_tx.tx.clone()];
                        match self.key_storage {
index 4e5d726392186e8a18c9cf452f5ae8d61d316558..4f6bf3a60ec1f01de8316be52fa36ffee359b64d 100644 (file)
@@ -426,9 +426,9 @@ fn test_update_fee_that_funder_cannot_afford() {
                let chan = chan_lock.by_id.get(&channel_id).unwrap();
 
                //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
-               let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2;
+               let num_htlcs = chan.channel_monitor().get_latest_local_commitment_txn()[0].output.len() - 2;
                let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
-               let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value);
+               let mut actual_fee = chan.channel_monitor().get_latest_local_commitment_txn()[0].output.iter().fold(0, |acc, output| acc + output.value);
                actual_fee = channel_value - actual_fee;
                assert_eq!(total_fee, actual_fee);
        } //drop the mutex
@@ -1267,7 +1267,7 @@ fn test_duplicate_htlc_different_direction_onchain() {
        check_added_monitors!(nodes[0], 1);
 
        // Broadcast node 1 commitment txn
-       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
        let mut has_both_htlcs = 0; // check htlcs match ones committed
@@ -1286,7 +1286,11 @@ fn test_duplicate_htlc_different_direction_onchain() {
        // Check we only broadcast 1 timeout tx
        let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
        let htlc_pair = if claim_txn[0].output[0].value == 800_000 / 1000 { (claim_txn[0].clone(), claim_txn[1].clone()) } else { (claim_txn[1].clone(), claim_txn[0].clone()) };
-       assert_eq!(claim_txn.len(), 6);
+       assert_eq!(claim_txn.len(), 7);
+       check_spends!(claim_txn[2], chan_1.3);
+       check_spends!(claim_txn[3], claim_txn[2]);
+       assert_eq!(claim_txn[0], claim_txn[5]);
+       assert_eq!(claim_txn[1], claim_txn[6]);
        assert_eq!(htlc_pair.0.input.len(), 1);
        assert_eq!(htlc_pair.0.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
        check_spends!(htlc_pair.0, remote_txn[0].clone());
@@ -1874,7 +1878,7 @@ fn test_justice_tx() {
        // A pending HTLC which will be revoked:
        let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        // Get the will-be-revoked local txn from nodes[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
@@ -1922,7 +1926,7 @@ fn test_justice_tx() {
        // A pending HTLC which will be revoked:
        let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
        // Get the will-be-revoked local txn from B
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
@@ -1961,7 +1965,7 @@ fn revoked_output_claim() {
        let nodes = create_network(2, &[None, None]);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
        // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn.len(), 1);
        // Only output is the full channel value back to nodes[0]:
        assert_eq!(revoked_local_txn[0].output.len(), 1);
@@ -1999,7 +2003,7 @@ fn claim_htlc_outputs_shared_tx() {
        let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 
        // Get the will-be-revoked local txn from node[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
@@ -2074,7 +2078,7 @@ fn claim_htlc_outputs_single_tx() {
        let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
 
        // Get the will-be-revoked local txn from node[0]
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        //Revoke the old state
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1, 3_000_000);
@@ -2176,7 +2180,7 @@ fn test_htlc_on_chain_success() {
 
        // Broadcast legit commitment tx from C on B's chain
        // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(commitment_tx.len(), 1);
        check_spends!(commitment_tx[0], chan_2.3.clone());
        nodes[2].node.claim_funds(our_payment_preimage, 3_000_000);
@@ -2190,10 +2194,12 @@ fn test_htlc_on_chain_success() {
 
        nodes[2].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[2]);
-       let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
-       assert_eq!(node_txn.len(), 5);
+       let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
+       assert_eq!(node_txn.len(), 7);
        assert_eq!(node_txn[0], node_txn[3]);
        assert_eq!(node_txn[1], node_txn[4]);
+       assert_eq!(node_txn[0], node_txn[5]);
+       assert_eq!(node_txn[1], node_txn[6]);
        assert_eq!(node_txn[2], commitment_tx[0]);
        check_spends!(node_txn[0], commitment_tx[0].clone());
        check_spends!(node_txn[1], commitment_tx[0].clone());
@@ -2271,13 +2277,13 @@ fn test_htlc_on_chain_success() {
 
        // Broadcast legit commitment tx from A on B's chain
        // Broadcast preimage tx by B on offered output from A commitment tx  on A's chain
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        check_spends!(commitment_tx[0], chan_1.3.clone());
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        check_closed_broadcast!(nodes[1]);
-       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan)
-       assert_eq!(node_txn.len(), 3);
-       assert_eq!(node_txn[0], node_txn[2]);
+       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + 2*HTLC-Success), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan)
+       assert_eq!(node_txn.len(), 5);
+       assert_eq!(node_txn[0], node_txn[4]);
        check_spends!(node_txn[0], commitment_tx[0].clone());
        assert_eq!(node_txn[0].input.len(), 2);
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
@@ -2286,6 +2292,8 @@ fn test_htlc_on_chain_success() {
        assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
        check_spends!(node_txn[1], chan_1.3.clone());
        assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
+       check_spends!(node_txn[2], node_txn[1]);
+       check_spends!(node_txn[3], node_txn[1]);
        // We don't bother to check that B can claim the HTLC output on its commitment tx here as
        // we already checked the same situation with A.
 
@@ -2335,7 +2343,7 @@ fn test_htlc_on_chain_timeout() {
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
 
        // Broadcast legit commitment tx from C on B's chain
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        check_spends!(commitment_tx[0], chan_2.3.clone());
        nodes[2].node.fail_htlc_backwards(&payment_hash);
        check_added_monitors!(nodes[2], 0);
@@ -2408,7 +2416,7 @@ fn test_htlc_on_chain_timeout() {
        assert_eq!(node_txn.len(), 0);
 
        // Broadcast legit commitment tx from B on A's chain
-       let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        check_spends!(commitment_tx[0], chan_1.3.clone());
 
        nodes[0].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
@@ -2437,7 +2445,7 @@ fn test_simple_commitment_revoked_fail_backward() {
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
        // Get the will-be-revoked local txn from nodes[2]
-       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, 3_000_000);
 
@@ -2505,7 +2513,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
        // Get the will-be-revoked local txn from nodes[2]
-       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
        // Revoke the old state
        claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage, if no_to_remote { 10_000 } else { 3_000_000});
@@ -3733,7 +3741,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
 
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, LocalFeatures::new(), LocalFeatures::new());
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
@@ -3760,7 +3768,7 @@ fn test_static_spendable_outputs_preimage_tx() {
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
 
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(commitment_tx[0].input.len(), 1);
        assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -3780,11 +3788,13 @@ fn test_static_spendable_outputs_preimage_tx() {
        }
 
        // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
-       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 1 (local commitment tx), ChannelMonitor: 2 (1 preimage tx) * 2 (block-rescan)
+       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (local commitment tx + HTLC-Success), ChannelMonitor: 2 (1 preimage tx)
+       assert_eq!(node_txn.len(), 4);
        check_spends!(node_txn[0], commitment_tx[0].clone());
-       assert_eq!(node_txn[0], node_txn[2]);
+       assert_eq!(node_txn[0], node_txn[3]);
        assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
        check_spends!(node_txn[1], chan_1.3.clone());
+       check_spends!(node_txn[2], node_txn[1]);
 
        let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1);
        assert_eq!(spend_txn.len(), 2);
@@ -3800,7 +3810,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -3830,7 +3840,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -3874,7 +3884,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
 
@@ -3933,7 +3943,7 @@ fn test_onchain_to_onchain_claim() {
 
        let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
-       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        check_spends!(commitment_tx[0], chan_2.3.clone());
        nodes[2].node.claim_funds(payment_preimage, 3_000_000);
        check_added_monitors!(nodes[2], 1);
@@ -3947,8 +3957,9 @@ fn test_onchain_to_onchain_claim() {
        check_closed_broadcast!(nodes[2]);
 
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
-       assert_eq!(c_txn.len(), 3);
+       assert_eq!(c_txn.len(), 4);
        assert_eq!(c_txn[0], c_txn[2]);
+       assert_eq!(c_txn[0], c_txn[3]);
        assert_eq!(commitment_tx[0], c_txn[1]);
        check_spends!(c_txn[1], chan_2.3.clone());
        check_spends!(c_txn[2], c_txn[1].clone());
@@ -3991,12 +4002,13 @@ fn test_onchain_to_onchain_claim() {
                _ => panic!("Unexpected event"),
        };
        // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
-       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
        let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-       assert_eq!(b_txn.len(), 3);
+       assert_eq!(b_txn.len(), 4);
        check_spends!(b_txn[1], chan_1.3); // Local commitment tx, issued by ChannelManager
-       assert_eq!(b_txn[0], b_txn[2]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan
+       check_spends!(b_txn[2], b_txn[1]); // HTLC-Success tx, as a part of the local txn rebroadcast by ChannelManager in the force close
+       assert_eq!(b_txn[0], b_txn[3]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan
        check_spends!(b_txn[0], commitment_tx[0].clone());
        assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
        assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
@@ -4018,7 +4030,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        *nodes[0].network_payment_count.borrow_mut() -= 1;
        assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash);
 
-       let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+       let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(commitment_txn[0].input.len(), 1);
        check_spends!(commitment_txn[0], chan_2.3.clone());
 
@@ -4056,9 +4068,11 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
                _ => panic!("Unexepected event"),
        }
        let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
-       assert_eq!(htlc_success_txn.len(), 5);
+       assert_eq!(htlc_success_txn.len(), 7);
        check_spends!(htlc_success_txn[2], chan_2.3.clone());
-       assert_eq!(htlc_success_txn[0], htlc_success_txn[3]);
+       check_spends!(htlc_success_txn[3], htlc_success_txn[2]);
+       check_spends!(htlc_success_txn[4], htlc_success_txn[2]);
+       assert_eq!(htlc_success_txn[0], htlc_success_txn[5]);
        assert_eq!(htlc_success_txn[0].input.len(), 1);
        assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
        assert_eq!(htlc_success_txn[1], htlc_success_txn[4]);
@@ -4129,7 +4143,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
-       let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3.clone());
 
@@ -4183,7 +4197,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Rebalance and check output sanity...
        send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000, 500_000);
        send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000, 500_000);
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 2);
+       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn()[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
        // 0th HTLC:
@@ -4220,8 +4234,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        // Double-check that six of the new HTLC were added
        // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
        // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.len(), 1);
-       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 8);
+       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn().len(), 1);
+       assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn()[0].output.len(), 8);
 
        // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
        // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
@@ -4252,7 +4266,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]).unwrap();
        commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
 
-       let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        expect_pending_htlcs_forwardable!(nodes[3]);
        check_added_monitors!(nodes[3], 1);
@@ -4280,7 +4294,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        //
        // Alternatively, we may broadcast the previous commitment transaction, which should only
        // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
-       let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        if announce_latest {
@@ -4417,7 +4431,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
 
        route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
-       let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+       let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(local_txn[0].input.len(), 1);
        check_spends!(local_txn[0], chan_1.3.clone());
 
@@ -5702,7 +5716,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
        // Cache one local commitment tx as previous
-       let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        // Fail one HTLC to prune it in the will-be-latest-local commitment tx
        assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
@@ -5716,7 +5730,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        check_added_monitors!(nodes[0], 1);
 
        // Cache one local commitment tx as lastest
-       let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        match events[0] {
@@ -5843,8 +5857,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let (_payment_preimage_1, dust_hash) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        let (_payment_preimage_2, non_dust_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
 
-       let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
-       let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
 
        // We revoked bs_commitment_tx
        if revoked {
@@ -6342,7 +6356,7 @@ fn test_bump_penalty_txn_on_revoked_commitment() {
        let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 3000000, 30).unwrap();
        send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000);
 
-       let revoked_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
        assert_eq!(revoked_txn[0].output.len(), 4);
        assert_eq!(revoked_txn[0].input.len(), 1);
@@ -6442,7 +6456,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3_000_000).0;
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
 
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
 
@@ -6586,7 +6600,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
 
        // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
-       let remote_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let remote_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(remote_txn[0].output.len(), 4);
        assert_eq!(remote_txn[0].input.len(), 1);
        assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
@@ -6604,13 +6618,16 @@ fn test_bump_penalty_txn_on_remote_commitment() {
        let feerate_preimage;
        {
                let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 6); // 2 * claim tx (broadcasted from ChannelMonitor) * 2 (block-reparsing) + local commitment tx + local HTLC-timeout (broadcasted from ChannelManager)
-               assert_eq!(node_txn[0], node_txn[4]);
-               assert_eq!(node_txn[1], node_txn[5]);
+               assert_eq!(node_txn.len(), 7); // 2 * claim tx (broadcasted from ChannelMonitor) * 2 (block-reparsing) + local commitment tx + local HTLC-timeout + HTLC-success (broadcasted from ChannelManager)
+               assert_eq!(node_txn[0], node_txn[5]);
+               assert_eq!(node_txn[1], node_txn[6]);
                assert_eq!(node_txn[0].input.len(), 1);
                assert_eq!(node_txn[1].input.len(), 1);
                check_spends!(node_txn[0], remote_txn[0].clone());
                check_spends!(node_txn[1], remote_txn[0].clone());
+               check_spends!(node_txn[2], chan.3);
+               check_spends!(node_txn[3], node_txn[2]);
+               check_spends!(node_txn[4], node_txn[2]);
                if node_txn[0].input[0].witness.last().unwrap().len() == ACCEPTED_HTLC_SCRIPT_WEIGHT {
                        timeout = node_txn[0].txid();
                        let index = node_txn[0].input[0].previous_output.vout;
@@ -6690,10 +6707,13 @@ fn test_set_outpoints_partial_claiming() {
        let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
 
        // Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
-       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+       assert_eq!(remote_txn.len(), 3);
        assert_eq!(remote_txn[0].output.len(), 4);
        assert_eq!(remote_txn[0].input.len(), 1);
        assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
+       check_spends!(remote_txn[1], remote_txn[0].clone());
+       check_spends!(remote_txn[2], remote_txn[0].clone());
 
        // Connect blocks on node A to advance height towards TEST_FINAL_CLTV
        let prev_header_100 = connect_blocks(&nodes[1].block_notifier, 100, 0, false, Default::default());
@@ -6710,8 +6730,12 @@ fn test_set_outpoints_partial_claiming() {
        // Verify node A broadcast tx claiming both HTLCs
        {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 3);
+               assert_eq!(node_txn.len(), 5);
+               assert_eq!(node_txn[0], node_txn[4]);
                check_spends!(node_txn[0], remote_txn[0].clone());
+               check_spends!(node_txn[1], chan.3.clone());
+               check_spends!(node_txn[2], node_txn[1]);
+               check_spends!(node_txn[3], node_txn[1]);
                assert_eq!(node_txn[0].input.len(), 2);
                node_txn.clear();
        }
@@ -6775,7 +6799,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
        let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
        route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
 
-       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+       let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
        assert_eq!(revoked_local_txn[0].input.len(), 1);
        assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());