Always process `ChannelMonitorUpdate`s asynchronously
authorMatt Corallo <git@bluematt.me>
Wed, 11 Jan 2023 21:37:57 +0000 (21:37 +0000)
committerMatt Corallo <git@bluematt.me>
Wed, 22 Feb 2023 17:34:46 +0000 (17:34 +0000)
We currently have two codepaths on most channel update functions -
most methods return a set of messages to send a peer iff the
`ChannelMonitorUpdate` succeeds, but if it does not we push the
messages back into the `Channel` and then pull them back out when
the `ChannelMonitorUpdate` completes and send them then. This adds
a substantial amount of complexity in very critical codepaths.

Instead, here we swap all our channel update codepaths to
immediately set the channel-update-required flag and only return a
`ChannelMonitorUpdate` to the `ChannelManager`. Internally in the
`Channel` we store a queue of `ChannelMonitorUpdate`s, which will
become critical in future work to surface pending
`ChannelMonitorUpdate`s to users at startup so they can complete.

This leaves some redundant work in `Channel` to be cleaned up
later. Specifically, we still generate the messages which we will
now ignore and regenerate later.

This commit updates the `ChannelMonitorUpdate` pipeline across all
the places we generate them.

lightning/src/chain/chainmonitor.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs

index 3a2077209c4ea36ebf3b412a048f845e1b344c1f..9a74f891b1e985b371f2640ccf23f7c992e626ec 100644 (file)
@@ -796,7 +796,7 @@ mod tests {
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::ChannelMessageHandler;
        use crate::util::errors::APIError;
-       use crate::util::events::{ClosureReason, MessageSendEvent, MessageSendEventsProvider};
+       use crate::util::events::{Event, ClosureReason, MessageSendEvent, MessageSendEventsProvider};
 
        #[test]
        fn test_async_ooo_offchain_updates() {
@@ -819,10 +819,8 @@ mod tests {
 
                nodes[1].node.claim_funds(payment_preimage_1);
                check_added_monitors!(nodes[1], 1);
-               expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
                nodes[1].node.claim_funds(payment_preimage_2);
                check_added_monitors!(nodes[1], 1);
-               expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
 
                let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone();
                assert_eq!(persistences.len(), 1);
@@ -850,8 +848,24 @@ mod tests {
                        .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update));
                assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty());
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
                nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap();
 
+               let claim_events = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(claim_events.len(), 2);
+               match claim_events[0] {
+                       Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+                               assert_eq!(payment_hash_1, *payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match claim_events[1] {
+                       Event::PaymentClaimed { ref payment_hash, amount_msat: 1_000_000, .. } => {
+                               assert_eq!(payment_hash_2, *payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
                // Now manually walk the commitment signed dance - because we claimed two payments
                // back-to-back it doesn't fit into the neat walk commitment_signed_dance does.
 
index fdb66cc4dc8224d6c95b4abd57bdeaf24983509a..b1d43ca07a0d3ce6b62ba3d7e985db13758574f7 100644 (file)
@@ -143,7 +143,7 @@ fn test_monitor_and_persister_update_fail() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Check that even though the persister is returning a InProgress,
                        // because the update is bogus, ultimately the error that's returned
                        // should be a PermanentFailure.
@@ -1602,7 +1602,6 @@ fn test_monitor_update_fail_claim() {
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        check_added_monitors!(nodes[1], 1);
 
@@ -1628,6 +1627,7 @@ fn test_monitor_update_fail_claim() {
        let events = nodes[1].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
        commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+       expect_pending_htlcs_forwardable_ignore!(nodes[1]);
 
        let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
        nodes[2].node.send_payment(&route, payment_hash_3, &Some(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
@@ -1645,6 +1645,7 @@ fn test_monitor_update_fail_claim() {
        let channel_id = chan_1.2;
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 0);
 
        let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
@@ -1653,7 +1654,7 @@ fn test_monitor_update_fail_claim() {
        expect_payment_sent!(nodes[0], payment_preimage_1);
 
        // Get the payment forwards, note that they were batched into one commitment update.
-       expect_pending_htlcs_forwardable!(nodes[1]);
+       nodes[1].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[1], 1);
        let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
@@ -1739,7 +1740,6 @@ fn test_monitor_update_on_pending_forwards() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
        check_added_monitors!(nodes[1], 1);
-       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1753,17 +1753,17 @@ fn test_monitor_update_on_pending_forwards() {
 
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 3);
-       if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[0] {
+       if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
                assert_eq!(payment_hash, payment_hash_1);
                assert!(payment_failed_permanently);
        } else { panic!("Unexpected event!"); }
-       match events[1] {
+       match events[2] {
                Event::PaymentFailed { payment_hash, .. } => {
                        assert_eq!(payment_hash, payment_hash_1);
                },
                _ => panic!("Unexpected event"),
        }
-       match events[2] {
+       match events[0] {
                Event::PendingHTLCsForwardable { .. } => { },
                _ => panic!("Unexpected event"),
        };
@@ -1803,7 +1803,6 @@ fn monitor_update_claim_fail_no_response() {
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[1].node.claim_funds(payment_preimage_1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 1);
 
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@@ -1811,6 +1810,7 @@ fn monitor_update_claim_fail_no_response() {
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        check_added_monitors!(nodes[1], 0);
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
@@ -2290,7 +2290,6 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
        nodes[0].node.claim_funds(payment_preimage_0);
        check_added_monitors!(nodes[0], 1);
-       expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
        nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
@@ -2353,6 +2352,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
        let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
        nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
+       expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
 
        // New outbound messages should be generated immediately upon a call to
        // get_and_clear_pending_msg_events (but not before).
@@ -2606,7 +2606,15 @@ fn test_permanent_error_during_sending_shutdown() {
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
-       check_closed_broadcast!(nodes[0], true);
+
+       // We always send the `shutdown` response when initiating a shutdown, even if we immediately
+       // close the channel thereafter.
+       let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 3);
+       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
+
        check_added_monitors!(nodes[0], 2);
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
@@ -2629,7 +2637,15 @@ fn test_permanent_error_during_handling_shutdown() {
        assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
        let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
        nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
-       check_closed_broadcast!(nodes[1], true);
+
+       // We always send the `shutdown` response when receiving a shutdown, even if we immediately
+       // close the channel thereafter.
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(msg_events.len(), 3);
+       if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
+       if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
+       if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
+
        check_added_monitors!(nodes[1], 2);
        check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
 }
@@ -2651,7 +2667,6 @@ fn double_temp_error() {
        // `claim_funds` results in a ChannelMonitorUpdate.
        nodes[1].node.claim_funds(payment_preimage_1);
        check_added_monitors!(nodes[1], 1);
-       expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
        let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
 
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
@@ -2659,7 +2674,6 @@ fn double_temp_error() {
        // which had some asserts that prevented it from being called twice.
        nodes[1].node.claim_funds(payment_preimage_2);
        check_added_monitors!(nodes[1], 1);
-       expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
        chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 
        let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -2668,11 +2682,24 @@ fn double_temp_error() {
        check_added_monitors!(nodes[1], 0);
        nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
 
-       // Complete the first HTLC.
-       let events = nodes[1].node.get_and_clear_pending_msg_events();
-       assert_eq!(events.len(), 1);
+       // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
+       // and get both PaymentClaimed events at once.
+       let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+
+       let events = nodes[1].node.get_and_clear_pending_events();
+       assert_eq!(events.len(), 2);
+       match events[0] {
+               Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
+               _ => panic!("Unexpected Event: {:?}", events[0]),
+       }
+       match events[1] {
+               Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
+               _ => panic!("Unexpected Event: {:?}", events[1]),
+       }
+
+       assert_eq!(msg_events.len(), 1);
        let (update_fulfill_1, commitment_signed_b1, node_id) = {
-               match &events[0] {
+               match &msg_events[0] {
                        &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
                                assert!(update_add_htlcs.is_empty());
                                assert_eq!(update_fulfill_htlcs.len(), 1);
index 896c475049b4df56673dfa5e2c3ca7d45760e448..53e8201ff5d190a22d6730e086dce2934db3a3cc 100644 (file)
@@ -393,35 +393,21 @@ enum UpdateFulfillFetch {
 }
 
 /// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch {
+pub enum UpdateFulfillCommitFetch<'a> {
        /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
        /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
        /// previously placed in the holding cell (and has since been removed).
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
-               monitor_update: ChannelMonitorUpdate,
+               monitor_update: &'a ChannelMonitorUpdate,
                /// The value of the HTLC which was claimed, in msat.
                htlc_value_msat: u64,
-               /// The update_fulfill message and commitment_signed message (if the claim was not placed
-               /// in the holding cell).
-               msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
        },
        /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
        /// or has been forgotten (presumably previously claimed).
        DuplicateClaim {},
 }
 
-/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC
-/// state.
-pub(super) struct RAAUpdates {
-       pub commitment_update: Option<msgs::CommitmentUpdate>,
-       pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
-       pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
-       pub finalized_claimed_htlcs: Vec<HTLCSource>,
-       pub monitor_update: ChannelMonitorUpdate,
-       pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
-}
-
 /// The return value of `monitor_updating_restored`
 pub(super) struct MonitorRestoreUpdates {
        pub raa: Option<msgs::RevokeAndACK>,
@@ -1979,22 +1965,30 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
        }
 
-       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
+       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
-                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => {
-                               let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
-                                       Err(e) => return Err((e, monitor_update)),
-                                       Ok(res) => res
-                               };
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
-                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) })
+                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               UpdateFulfillCommitFetch::NewClaim {
+                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
+                                       htlc_value_msat,
+                               }
                        },
-                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } =>
-                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }),
-                       UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
+                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
+                               self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               UpdateFulfillCommitFetch::NewClaim {
+                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
+                                       htlc_value_msat,
+                               }
+                       }
+                       UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
                }
        }
 
@@ -3049,17 +3043,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(())
        }
 
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)>
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
                where L::Target: Logger
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())));
+                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())));
+                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())));
+                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let funding_script = self.get_funding_redeemscript();
@@ -3077,7 +3071,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
                                log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
                        if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
-                               return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
+                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
                        bitcoin_tx.txid
                };
@@ -3092,7 +3086,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        debug_assert!(!self.is_outbound());
                        let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000;
                        if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
-                               return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())));
+                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
                        }
                }
                #[cfg(any(test, fuzzing))]
@@ -3114,7 +3108,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
 
                if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
-                       return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))));
+                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
                }
 
                // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
@@ -3132,7 +3126,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
                                        encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
                                if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
-                                       return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
+                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
                                htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
                        } else {
@@ -3148,10 +3142,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.counterparty_funding_pubkey()
                );
 
-               let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx);
                self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
-                       .map_err(|_| (None, ChannelError::Close("Failed to validate our commitment".to_owned())))?;
-               let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1);
+                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
 
                // Update state now that we've passed all the can-fail calls...
                let mut need_commitment = false;
@@ -3196,7 +3188,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                self.cur_holder_commitment_transaction_number -= 1;
                // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
-               // send_commitment_no_status_check() next which will reset this to RAAFirst.
+               // build_commitment_no_status_check() next which will reset this to RAAFirst.
                self.resend_order = RAACommitmentOrder::CommitmentFirst;
 
                if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
@@ -3208,52 +3200,50 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // the corresponding HTLC status updates so that get_last_commitment_update
                                // includes the right HTLCs.
                                self.monitor_pending_commitment_signed = true;
-                               let (_, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
                        log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
                                log_bytes!(self.channel_id));
-                       return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
+                       self.pending_monitor_updates.push(monitor_update);
+                       return Ok(self.pending_monitor_updates.last().unwrap());
                }
 
-               let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
-                       let (msg, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?;
-                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                        // strictly increasing by one, so decrement it here.
                        self.latest_monitor_update_id = monitor_update.update_id;
                        monitor_update.updates.append(&mut additional_update.updates);
-                       Some(msg)
-               } else { None };
+                       true
+               } else { false };
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" });
-
-               Ok((msgs::RevokeAndACK {
-                       channel_id: self.channel_id,
-                       per_commitment_secret,
-                       next_per_commitment_point,
-               }, commitment_signed, monitor_update))
+                       log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+               self.pending_monitor_updates.push(monitor_update);
+               self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+               return Ok(self.pending_monitor_updates.last().unwrap());
        }
 
        /// Public version of the below, checking relevant preconditions first.
        /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
        /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                if self.channel_state >= ChannelState::ChannelReady as u32 &&
                   (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(logger)
-               } else { Ok((None, Vec::new())) }
+               } else { (None, Vec::new()) }
        }
 
        /// Frees any pending commitment updates in the holding cell, generating the relevant messages
        /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger {
+       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(),
@@ -3334,7 +3324,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }
                        }
                        if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() {
-                               return Ok((None, htlcs_to_fail));
+                               return (None, htlcs_to_fail);
                        }
                        let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() {
                                self.send_update_fee(feerate, false, logger)
@@ -3342,8 +3332,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                None
                        };
 
-                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
-                       // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
                        // but we want them to be strictly increasing by one, so reset it here.
                        self.latest_monitor_update_id = monitor_update.update_id;
                        monitor_update.updates.append(&mut additional_update.updates);
@@ -3352,16 +3342,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 
-                       Ok((Some((msgs::CommitmentUpdate {
-                               update_add_htlcs,
-                               update_fulfill_htlcs,
-                               update_fail_htlcs,
-                               update_fail_malformed_htlcs: Vec::new(),
-                               update_fee,
-                               commitment_signed,
-                       }, monitor_update)), htlcs_to_fail))
+                       self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
                } else {
-                       Ok((None, Vec::new()))
+                       (None, Vec::new())
                }
        }
 
@@ -3370,7 +3355,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<RAAUpdates, ChannelError>
+       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
                where L::Target: Logger,
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
@@ -3557,8 +3542,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // When the monitor updating is restored we'll call get_last_commitment_update(),
                                // which does not update state, but we're definitely now awaiting a remote revoke
                                // before we can step forward any more, so set it here.
-                               let (_, mut additional_update) = self.send_commitment_no_status_check(logger)?;
-                               // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
@@ -3567,71 +3552,41 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
-                       return Ok(RAAUpdates {
-                               commitment_update: None, finalized_claimed_htlcs: Vec::new(),
-                               accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(),
-                               monitor_update,
-                               holding_cell_failed_htlcs: Vec::new()
-                       });
+                       self.pending_monitor_updates.push(monitor_update);
+                       return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
                }
 
-               match self.free_holding_cell_htlcs(logger)? {
-                       (Some((mut commitment_update, mut additional_update)), htlcs_to_fail) => {
-                               commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len());
-                               for fail_msg in update_fail_htlcs.drain(..) {
-                                       commitment_update.update_fail_htlcs.push(fail_msg);
-                               }
-                               commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
-                               for fail_msg in update_fail_malformed_htlcs.drain(..) {
-                                       commitment_update.update_fail_malformed_htlcs.push(fail_msg);
-                               }
-
+               match self.free_holding_cell_htlcs(logger) {
+                       (Some(_), htlcs_to_fail) => {
+                               let mut additional_update = self.pending_monitor_updates.pop().unwrap();
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
 
-                               Ok(RAAUpdates {
-                                       commitment_update: Some(commitment_update),
-                                       finalized_claimed_htlcs,
-                                       accepted_htlcs: to_forward_infos,
-                                       failed_htlcs: revoked_htlcs,
-                                       monitor_update,
-                                       holding_cell_failed_htlcs: htlcs_to_fail
-                               })
+                               self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                               self.pending_monitor_updates.push(monitor_update);
+                               Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                        },
                        (None, htlcs_to_fail) => {
                                if require_commitment {
-                                       let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?;
+                                       let mut additional_update = self.build_commitment_no_status_check(logger);
 
-                                       // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                        // strictly increasing by one, so decrement it here.
                                        self.latest_monitor_update_id = monitor_update.update_id;
                                        monitor_update.updates.append(&mut additional_update.updates);
 
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
                                                log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
-                                       Ok(RAAUpdates {
-                                               commitment_update: Some(msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs,
-                                                       update_fail_malformed_htlcs,
-                                                       update_fee: None,
-                                                       commitment_signed
-                                               }),
-                                               finalized_claimed_htlcs,
-                                               accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
-                                               monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
-                                       })
+                                       self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       self.pending_monitor_updates.push(monitor_update);
+                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                                } else {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
-                                       Ok(RAAUpdates {
-                                               commitment_update: None,
-                                               finalized_claimed_htlcs,
-                                               accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs,
-                                               monitor_update, holding_cell_failed_htlcs: htlcs_to_fail
-                                       })
+                                       self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       self.pending_monitor_updates.push(monitor_update);
+                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
                                }
                        }
                }
@@ -3818,6 +3773,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+               self.pending_monitor_updates.clear();
 
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
@@ -4295,7 +4251,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn shutdown<SP: Deref>(
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
-       ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+       ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        where SP::Target: SignerProvider
        {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4351,12 +4307,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let monitor_update = if update_shutdown_script {
                        self.latest_monitor_update_id += 1;
-                       Some(ChannelMonitorUpdate {
+                       let monitor_update = ChannelMonitorUpdate {
                                update_id: self.latest_monitor_update_id,
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
-                       })
+                       };
+                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       Some(self.pending_monitor_updates.last().unwrap())
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
@@ -5810,15 +5769,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(Some(res))
        }
 
-       /// Only fails in case of signer rejection.
-       fn send_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger {
-               let monitor_update = self.build_commitment_no_status_check(logger);
-               match self.send_commitment_no_state_update(logger) {
-                       Ok((commitment_signed, _)) => Ok((commitment_signed, monitor_update)),
-                       Err(e) => Err(e),
-               }
-       }
-
        fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
                log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
                // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
@@ -5944,16 +5894,20 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
        }
 
-       /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
-       /// to send to the remote peer in one go.
+       /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+       /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
        ///
        /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
-       /// [`Self::send_htlc`] and [`Self::send_commitment_no_state_update`] for more info.
-       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> where L::Target: Logger {
-               match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger)? {
-                       Some(update_add_htlc) => {
-                               let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?;
-                               Ok(Some((update_add_htlc, commitment_signed, monitor_update)))
+       /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+               if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+               match send_res? {
+                       Some(_) => {
+                               let monitor_update = self.build_commitment_no_status_check(logger);
+                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                               self.pending_monitor_updates.push(monitor_update);
+                               Ok(Some(self.pending_monitor_updates.last().unwrap()))
                        },
                        None => Ok(None)
                }
@@ -5979,8 +5933,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        /// Begins the shutdown process, getting a message for the remote peer and returning all
        /// holding cell HTLCs for payment failure.
-       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option<u32>)
-       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       ///
+       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+       /// [`ChannelMonitorUpdate`] will be returned).
+       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+               target_feerate_sats_per_kw: Option<u32>)
+       -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        where SP::Target: SignerProvider {
                for htlc in self.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
@@ -6023,12 +5981,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let monitor_update = if update_shutdown_script {
                        self.latest_monitor_update_id += 1;
-                       Some(ChannelMonitorUpdate {
+                       let monitor_update = ChannelMonitorUpdate {
                                update_id: self.latest_monitor_update_id,
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
-                       })
+                       };
+                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                       self.pending_monitor_updates.push(monitor_update);
+                       Some(self.pending_monitor_updates.last().unwrap())
                } else { None };
                let shutdown = msgs::Shutdown {
                        channel_id: self.channel_id,
@@ -6049,6 +6010,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                });
 
+               debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+                       "we can't both complete shutdown and return a monitor update");
+
                Ok((shutdown, monitor_update, dropped_outbound_htlcs))
        }
 
index ee3b50936a5e74d3b6fe84f7e89c3174e10f4f6e..675122cfb4df890b4c89ebff1b60aab95e7cf12f 100644 (file)
@@ -345,17 +345,6 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn ignore_no_close(err: String) -> Self {
-               Self {
-                       err: LightningError {
-                               err,
-                               action: msgs::ErrorAction::IgnoreError,
-                       },
-                       chan_id: None,
-                       shutdown_finish: None,
-               }
-       }
-       #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
                Self { err, chan_id: None, shutdown_finish: None }
        }
@@ -1869,25 +1858,27 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
-                                       let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let their_features = &peer_state.latest_features;
+                                       let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
+                                               .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
                                        failed_htlcs = htlcs;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
-                                               }
-                                       }
-
+                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                       // here as we don't need the monitor update to complete until we send a
+                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                node_id: *counterparty_node_id,
-                                               msg: shutdown_msg
+                                               msg: shutdown_msg,
                                        });
 
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update_opt.take() {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                       }
+
                                        if chan_entry.get().is_shutdown() {
                                                let channel = remove_channel!(self, chan_entry);
                                                if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
@@ -2494,51 +2485,32 @@ where
                                if !chan.get().is_live() {
                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                }
-                               match {
-                                       break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
-                                               htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
-                                                       path: path.clone(),
-                                                       session_priv: session_priv.clone(),
-                                                       first_hop_htlc_msat: htlc_msat,
-                                                       payment_id,
-                                                       payment_secret: payment_secret.clone(),
-                                                       payment_params: payment_params.clone(),
-                                               }, onion_packet, &self.logger),
-                                               chan)
-                               } {
-                                       Some((update_add, commitment_signed, monitor_update)) => {
-                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let chan_id = chan.get().channel_id();
-                                               match (update_err,
-                                                       handle_monitor_update_res!(self, update_err, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false, true))
-                                               {
-                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
-                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
-                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
-                                                               // Note that MonitorUpdateInProgress here indicates (per function
-                                                               // docs) that we will resend the commitment update once monitor
-                                                               // updating completes. Therefore, we must return an error
-                                                               // indicating that it is unsafe to retry the payment wholesale,
-                                                               // which we do in the send_payment check for
-                                                               // MonitorUpdateInProgress, below.
-                                                               return Err(APIError::MonitorUpdateInProgress);
-                                                       },
-                                                       _ => unreachable!(),
+                               let funding_txo = chan.get().get_funding_txo().unwrap();
+                               let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+                                       htlc_cltv, HTLCSource::OutboundRoute {
+                                               path: path.clone(),
+                                               session_priv: session_priv.clone(),
+                                               first_hop_htlc_msat: htlc_msat,
+                                               payment_id,
+                                               payment_secret: payment_secret.clone(),
+                                               payment_params: payment_params.clone(),
+                                       }, onion_packet, &self.logger);
+                               match break_chan_entry!(self, send_res, chan) {
+                                       Some(monitor_update) => {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
+                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan) {
+                                                       break Err(e);
+                                               }
+                                               if update_res == ChannelMonitorUpdateStatus::InProgress {
+                                                       // Note that MonitorUpdateInProgress here indicates (per function
+                                                       // docs) that we will resend the commitment update once monitor
+                                                       // updating completes. Therefore, we must return an error
+                                                       // indicating that it is unsafe to retry the payment wholesale,
+                                                       // which we do in the send_payment check for
+                                                       // MonitorUpdateInProgress, below.
+                                                       return Err(APIError::MonitorUpdateInProgress);
                                                }
-
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: path.first().unwrap().pubkey,
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: vec![update_add],
-                                                               update_fulfill_htlcs: Vec::new(),
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       },
-                                               });
                                        },
                                        None => { },
                                }
@@ -4037,7 +4009,6 @@ where
 
                let per_peer_state = self.per_peer_state.read().unwrap();
                let chan_id = prev_hop.outpoint.to_channel_id();
-
                let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                        Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
                        None => None
@@ -4049,99 +4020,57 @@ where
                        )
                ).unwrap_or(None);
 
-               if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id))
-               {
-                       let counterparty_node_id = chan.get().get_counterparty_node_id();
-                       match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok(msgs_monitor_option) => {
-                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
-                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
-                                                       ChannelMonitorUpdateStatus::Completed => {},
-                                                       e => {
-                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
-                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
-                                                                       payment_preimage, e);
-                                                               let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
-                                                               mem::drop(peer_state_opt);
-                                                               mem::drop(per_peer_state);
-                                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
-                                                               return Err((counterparty_node_id, err));
-                                                       }
-                                               }
-                                               if let Some((msg, commitment_signed)) = msgs {
-                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                                       peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                               node_id: counterparty_node_id,
-                                                               updates: msgs::CommitmentUpdate {
-                                                                       update_add_htlcs: Vec::new(),
-                                                                       update_fulfill_htlcs: vec![msg],
-                                                                       update_fail_htlcs: Vec::new(),
-                                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                                       update_fee: None,
-                                                                       commitment_signed,
-                                                               }
-                                                       });
-                                               }
-                                               mem::drop(peer_state_opt);
-                                               mem::drop(per_peer_state);
-                                               self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
-                                               Ok(())
-                                       } else {
-                                               Ok(())
-                                       }
-                               },
-                               Err((e, monitor_update)) => {
-                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
-                                               ChannelMonitorUpdateStatus::Completed => {},
-                                               e => {
-                                                       // TODO: This needs to be handled somehow - if we receive a monitor update
-                                                       // with a preimage we *must* somehow manage to propagate it to the upstream
-                                                       // channel, or we must have an ability to receive the same update and try
-                                                       // again on restart.
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
-                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                               payment_preimage, e);
-                                               },
+               if let Some(mut peer_state_lock) = peer_state_opt.take() {
+                       let peer_state = &mut *peer_state_lock;
+                       if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+                               let counterparty_node_id = chan.get().get_counterparty_node_id();
+                               let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+                               if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+                                       if let Some(action) = completion_action(Some(htlc_value_msat)) {
+                                               log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+                                                       log_bytes!(chan_id), action);
+                                               peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                        }
-                                       let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
-                                       if drop {
-                                               chan.remove_entry();
+                                       let update_id = monitor_update.update_id;
+                                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+                                       let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                               peer_state, chan);
+                                       if let Err(e) = res {
+                                               // TODO: This is a *critical* error - we probably updated the outbound edge
+                                               // of the HTLC's monitor with a preimage. We should retry this monitor
+                                               // update over and over again until morale improves.
+                                               log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+                                               return Err((counterparty_node_id, e));
                                        }
-                                       mem::drop(peer_state_opt);
-                                       mem::drop(per_peer_state);
-                                       self.handle_monitor_update_completion_actions(completion_action(None));
-                                       Err((counterparty_node_id, res))
-                               },
-                       }
-               } else {
-                       let preimage_update = ChannelMonitorUpdate {
-                               update_id: CLOSED_CHANNEL_UPDATE_ID,
-                               updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                                       payment_preimage,
-                               }],
-                       };
-                       // We update the ChannelMonitor on the backward link, after
-                       // receiving an `update_fulfill_htlc` from the forward link.
-                       let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
-                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                               // TODO: This needs to be handled somehow - if we receive a monitor update
-                               // with a preimage we *must* somehow manage to propagate it to the upstream
-                               // channel, or we must have an ability to receive the same event and try
-                               // again on restart.
-                               log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                       payment_preimage, update_res);
+                               }
+                               return Ok(());
                        }
-                       mem::drop(peer_state_opt);
-                       mem::drop(per_peer_state);
-                       // Note that we do process the completion action here. This totally could be a
-                       // duplicate claim, but we have no way of knowing without interrogating the
-                       // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
-                       // generally always allowed to be duplicative (and it's specifically noted in
-                       // `PaymentForwarded`).
-                       self.handle_monitor_update_completion_actions(completion_action(None));
-                       Ok(())
                }
+               let preimage_update = ChannelMonitorUpdate {
+                       update_id: CLOSED_CHANNEL_UPDATE_ID,
+                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                               payment_preimage,
+                       }],
+               };
+               // We update the ChannelMonitor on the backward link, after
+               // receiving an `update_fulfill_htlc` from the forward link.
+               let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
+               if update_res != ChannelMonitorUpdateStatus::Completed {
+                       // TODO: This needs to be handled somehow - if we receive a monitor update
+                       // with a preimage we *must* somehow manage to propagate it to the upstream
+                       // channel, or we must have an ability to receive the same event and try
+                       // again on restart.
+                       log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               payment_preimage, update_res);
+               }
+               // Note that we do process the completion action here. This totally could be a
+               // duplicate claim, but we have no way of knowing without interrogating the
+               // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
+               // generally always allowed to be duplicative (and it's specifically noted in
+               // `PaymentForwarded`).
+               self.handle_monitor_update_completion_actions(completion_action(None));
+               Ok(())
        }
 
        fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -4670,27 +4599,27 @@ where
                                                        if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
                                        }
 
-                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+                                       let funding_txo_opt = chan_entry.get().get_funding_txo();
+                                       let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
+                                               chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
                                        dropped_htlcs = htlcs;
 
-                                       // Update the monitor with the shutdown script if necessary.
-                                       if let Some(monitor_update) = monitor_update {
-                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
-                                               let (result, is_permanent) =
-                                                       handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                               if is_permanent {
-                                                       remove_channel!(self, chan_entry);
-                                                       break result;
-                                               }
-                                       }
-
                                        if let Some(msg) = shutdown {
+                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                               // here as we don't need the monitor update to complete until we send a
+                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: *counterparty_node_id,
                                                        msg,
                                                });
                                        }
 
+                                       // Update the monitor with the shutdown script if necessary.
+                                       if let Some(monitor_update) = monitor_update_opt {
+                                               let update_id = monitor_update.update_id;
+                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                       }
                                        break Ok(());
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -4702,8 +4631,7 @@ where
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
 
-               let _ = handle_error!(self, result, *counterparty_node_id);
-               Ok(())
+               result
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
@@ -4877,40 +4805,12 @@ where
                let peer_state = &mut *peer_state_lock;
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
-                               let (revoke_and_ack, commitment_signed, monitor_update) =
-                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
-                                               Err((None, e)) => try_chan_entry!(self, Err(e), chan),
-                                               Err((Some(update), e)) => {
-                                                       assert!(chan.get().is_awaiting_monitor_update());
-                                                       let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update);
-                                                       try_chan_entry!(self, Err(e), chan);
-                                                       unreachable!();
-                                               },
-                                               Ok(res) => res
-                                       };
-                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
-                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
-                                       return Err(e);
-                               }
-
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
-                                       node_id: counterparty_node_id.clone(),
-                                       msg: revoke_and_ack,
-                               });
-                               if let Some(msg) = commitment_signed {
-                                       peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: counterparty_node_id.clone(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: None,
-                                                       commitment_signed: msg,
-                                               },
-                                       });
-                               }
-                               Ok(())
+                               let funding_txo = chan.get().get_funding_txo();
+                               let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
+                               let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+                               let update_id = monitor_update.update_id;
+                               handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                       peer_state, chan)
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
@@ -5009,8 +4909,7 @@ where
        }
 
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
-               let mut htlcs_to_fail = Vec::new();
-               let res = loop {
+               let (htlcs_to_fail, res) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -5021,59 +4920,19 @@ where
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
-                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
-                                       let raa_updates = break_chan_entry!(self,
-                                               chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
-                                       htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update);
-                                       if was_paused_for_mon_update {
-                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
-                                               assert!(raa_updates.commitment_update.is_none());
-                                               assert!(raa_updates.accepted_htlcs.is_empty());
-                                               assert!(raa_updates.failed_htlcs.is_empty());
-                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
-                                       }
-                                       if update_res != ChannelMonitorUpdateStatus::Completed {
-                                               if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
-                                                               RAACommitmentOrder::CommitmentFirst, false,
-                                                               raa_updates.commitment_update.is_some(), false,
-                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                               raa_updates.finalized_claimed_htlcs) {
-                                                       break Err(e);
-                                               } else { unreachable!(); }
-                                       }
-                                       if let Some(updates) = raa_updates.commitment_update {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       updates,
-                                               });
-                                       }
-                                       break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                       raa_updates.finalized_claimed_htlcs,
-                                                       chan.get().get_short_channel_id()
-                                                               .unwrap_or(chan.get().outbound_scid_alias()),
-                                                       chan.get().get_funding_txo().unwrap(),
-                                                       chan.get().get_user_id()))
+                                       let funding_txo = chan.get().get_funding_txo();
+                                       let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+                                       let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
+                                       let update_id = monitor_update.update_id;
+                                       let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                               peer_state, chan);
+                                       (htlcs_to_fail, res)
                                },
-                               hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
-               match res {
-                       Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
-                               short_channel_id, channel_outpoint, user_channel_id)) =>
-                       {
-                               for failure in pending_failures.drain(..) {
-                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
-                                       self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
-                               }
-                               self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, user_channel_id, pending_forwards)]);
-                               self.finalize_claims(finalized_claim_htlcs);
-                               Ok(())
-                       },
-                       Err(e) => Err(e)
-               }
+               res
        }
 
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
@@ -5315,49 +5174,37 @@ where
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
                let mut handle_errors = Vec::new();
-               {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
+               let per_peer_state = self.per_peer_state.read().unwrap();
 
-                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                       'chan_loop: loop {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               let pending_msg_events = &mut peer_state.pending_msg_events;
-                               peer_state.channel_by_id.retain(|channel_id, chan| {
-                                       match chan.maybe_free_holding_cell_htlcs(&self.logger) {
-                                               Ok((commitment_opt, holding_cell_failed_htlcs)) => {
-                                                       if !holding_cell_failed_htlcs.is_empty() {
-                                                               failed_htlcs.push((
-                                                                       holding_cell_failed_htlcs,
-                                                                       *channel_id,
-                                                                       chan.get_counterparty_node_id()
-                                                               ));
-                                                       }
-                                                       if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) {
-                                                                       ChannelMonitorUpdateStatus::Completed => {
-                                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                                       node_id: chan.get_counterparty_node_id(),
-                                                                                       updates: commitment_update,
-                                                                               });
-                                                                       },
-                                                                       e => {
-                                                                               has_monitor_update = true;
-                                                                               let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                                               if close_channel { return false; }
-                                                                       },
-                                                               }
-                                                       }
-                                                       true
-                                               },
-                                               Err(e) => {
-                                                       let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
-                                                       handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
-                                                       // ChannelClosed event is generated by handle_error for us
-                                                       !close_channel
+                               let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+                               for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                       let counterparty_node_id = chan.get_counterparty_node_id();
+                                       let funding_txo = chan.get_funding_txo();
+                                       let (monitor_opt, holding_cell_failed_htlcs) =
+                                               chan.maybe_free_holding_cell_htlcs(&self.logger);
+                                       if !holding_cell_failed_htlcs.is_empty() {
+                                               failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+                                       }
+                                       if let Some(monitor_update) = monitor_opt {
+                                               has_monitor_update = true;
+
+                                               let update_res = self.chain_monitor.update_channel(
+                                                       funding_txo.expect("channel is live"), monitor_update);
+                                               let update_id = monitor_update.update_id;
+                                               let channel_id: [u8; 32] = *channel_id;
+                                               let res = handle_new_monitor_update!(self, update_res, update_id,
+                                                       peer_state_lock, peer_state, chan, MANUALLY_REMOVING,
+                                                       peer_state.channel_by_id.remove(&channel_id));
+                                               if res.is_err() {
+                                                       handle_errors.push((counterparty_node_id, res));
                                                }
+                                               continue 'chan_loop;
                                        }
-                               });
+                               }
+                               break 'chan_loop;
                        }
                }
 
@@ -7124,8 +6971,6 @@ where
                        // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
                        // map. Thus, if there are no entries we skip writing a TLV for it.
                        pending_claiming_payments = None;
-               } else {
-                       debug_assert!(false, "While we have code to serialize pending_claiming_payments, the map should always be empty until a later PR");
                }
 
                write_tlv_fields!(writer, {
index 3da49ca7dc8d6db2a2a3942c42c1a43e2355351b..fbd0e133fe50923a60f2b9764825cf9d92e3a1b3 100644 (file)
@@ -3618,22 +3618,22 @@ fn test_simple_peer_disconnect() {
                        _ => panic!("Unexpected event"),
                }
                match events[1] {
+                       Event::PaymentPathSuccessful { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+               match events[2] {
                        Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                                assert!(payment_failed_permanently);
                        },
                        _ => panic!("Unexpected event"),
                }
-               match events[2] {
+               match events[3] {
                        Event::PaymentFailed { payment_hash, .. } => {
                                assert_eq!(payment_hash, payment_hash_5);
                        },
                        _ => panic!("Unexpected event"),
                }
-               match events[3] {
-                       Event::PaymentPathSuccessful { .. } => {},
-                       _ => panic!("Unexpected event"),
-               }
        }
 
        claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
@@ -8186,7 +8186,7 @@ fn test_update_err_monitor_lockdown() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
                        assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                } else { assert!(false); }
@@ -8280,7 +8280,7 @@ fn test_concurrent_monitor_claim() {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
                let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
-               if let Ok((_, _, update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+               if let Ok(update) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                        // Watchtower Alice should already have seen the block and reject the update
                        assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
                        assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);