Replace `send_htlc` amount checking with available balances
[rust-lightning] / lightning / src / ln / channel.rs
index cc3f73523a379d17dd2a417567a90bfed70a0bde..577223c63826a0eecc3406e7a9f0f9de8276bf02 100644 (file)
@@ -25,7 +25,7 @@ use bitcoin::secp256k1;
 use crate::ln::{PaymentPreimage, PaymentHash};
 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
-use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect};
+use crate::ln::msgs::DecodeError;
 use crate::ln::script::{self, ShutdownScript};
 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
@@ -35,7 +35,7 @@ use crate::chain::BestBlock;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 use crate::events::ClosureReason;
 use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
@@ -73,6 +73,8 @@ pub struct AvailableBalances {
        pub outbound_capacity_msat: u64,
        /// The maximum value we can assign to the next outbound HTLC
        pub next_outbound_htlc_limit_msat: u64,
+       /// The minimum value we can assign to the next outbound HTLC
+       pub next_outbound_htlc_minimum_msat: u64,
 }
 
 #[derive(Debug, Clone, Copy, PartialEq)]
@@ -479,6 +481,21 @@ pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
 
+struct PendingChannelMonitorUpdate {
+       update: ChannelMonitorUpdate,
+       /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
+       /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
+       /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
+       ///
+       /// [`ChannelManager`]: super::channelmanager::ChannelManager
+       blocked: bool,
+}
+
+impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
+       (0, update, required),
+       (2, blocked, required),
+});
+
 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
 // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
@@ -744,7 +761,7 @@ pub(super) struct Channel<Signer: ChannelSigner> {
        /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
        /// completes we still need to be able to complete the persistence. Thus, we have to keep a
        /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
-       pending_monitor_updates: Vec<ChannelMonitorUpdate>,
+       pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
 }
 
 #[cfg(any(test, fuzzing))]
@@ -986,7 +1003,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 
                let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
-                       Some(signer_provider.get_shutdown_scriptpubkey())
+                       match signer_provider.get_shutdown_scriptpubkey() {
+                               Ok(scriptpubkey) => Some(scriptpubkey),
+                               Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+                       }
                } else { None };
 
                if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
@@ -995,6 +1015,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
+               let destination_script = match signer_provider.get_destination_script() {
+                       Ok(script) => script,
+                       Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+               };
+
                let temporary_channel_id = entropy_source.get_secure_random_bytes();
 
                Ok(Channel {
@@ -1021,7 +1046,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                        holder_signer,
                        shutdown_scriptpubkey,
-                       destination_script: signer_provider.get_destination_script(),
+                       destination_script,
 
                        cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
@@ -1314,7 +1339,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
                        match &msg.shutdown_scriptpubkey {
-                               &OptionalField::Present(ref script) => {
+                               &Some(ref script) => {
                                        // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
                                        if script.len() == 0 {
                                                None
@@ -1326,14 +1351,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &OptionalField::Absent => {
+                               &None => {
                                        return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
                } else { None };
 
                let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
-                       Some(signer_provider.get_shutdown_scriptpubkey())
+                       match signer_provider.get_shutdown_scriptpubkey() {
+                               Ok(scriptpubkey) => Some(scriptpubkey),
+                               Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+                       }
                } else { None };
 
                if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
@@ -1342,6 +1370,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
+               let destination_script = match signer_provider.get_destination_script() {
+                       Ok(script) => script,
+                       Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+               };
+
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 
@@ -1368,7 +1401,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                        holder_signer,
                        shutdown_scriptpubkey,
-                       destination_script: signer_provider.get_destination_script(),
+                       destination_script,
 
                        cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
@@ -1979,28 +2012,52 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+               let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
-                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
-                               let mut additional_update = self.build_commitment_no_status_check(logger);
-                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
-                               // strictly increasing by one, so decrement it here.
-                               self.latest_monitor_update_id = monitor_update.update_id;
-                               monitor_update.updates.append(&mut additional_update.updates);
-                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
-                               self.pending_monitor_updates.push(monitor_update);
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+                               // Even if we aren't supposed to let new monitor updates with commitment state
+                               // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+                               // matter what. Sadly, to push a new monitor update which flies before others
+                               // already queued, we have to insert it into the pending queue and update the
+                               // update_ids of all the following monitors.
+                               let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+                                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them
+                                       // to be strictly increasing by one, so decrement it here.
+                                       self.latest_monitor_update_id = monitor_update.update_id;
+                                       monitor_update.updates.append(&mut additional_update.updates);
+                                       self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+                                               update: monitor_update, blocked: false,
+                                       });
+                                       self.pending_monitor_updates.len() - 1
+                               } else {
+                                       let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked)
+                                               .unwrap_or(self.pending_monitor_updates.len());
+                                       let new_mon_id = self.pending_monitor_updates.get(insert_pos)
+                                               .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+                                       monitor_update.update_id = new_mon_id;
+                                       self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+                                               update: monitor_update, blocked: false,
+                                       });
+                                       for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+                                               held_update.update.update_id += 1;
+                                       }
+                                       if msg.is_some() {
+                                               debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+                                               let update = self.build_commitment_no_status_check(logger);
+                                               self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+                                                       update, blocked: true,
+                                               });
+                                       }
+                                       insert_pos
+                               };
+                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
                                UpdateFulfillCommitFetch::NewClaim {
-                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
+                                       monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos)
+                                               .expect("We just pushed the monitor update").update,
                                        htlc_value_msat,
                                }
                        },
-                       UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
-                               self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                               self.pending_monitor_updates.push(monitor_update);
-                               UpdateFulfillCommitFetch::NewClaim {
-                                       monitor_update: self.pending_monitor_updates.last().unwrap(),
-                                       htlc_value_msat,
-                               }
-                       }
                        UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
                }
        }
@@ -2191,7 +2248,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
                        match &msg.shutdown_scriptpubkey {
-                               &OptionalField::Present(ref script) => {
+                               &Some(ref script) => {
                                        // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
                                        if script.len() == 0 {
                                                None
@@ -2203,7 +2260,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &OptionalField::Absent => {
+                               &None => {
                                        return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
@@ -2621,6 +2678,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// corner case properly.
        pub fn get_available_balances(&self) -> AvailableBalances {
                // Note that we have to handle overflow due to the above case.
+               let inbound_stats = self.get_inbound_pending_htlc_stats(None);
                let outbound_stats = self.get_outbound_pending_htlc_stats(None);
 
                let mut balance_msat = self.value_to_self_msat;
@@ -2631,10 +2689,112 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
                balance_msat -= outbound_stats.pending_htlcs_value_msat;
 
-               let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64
-                               - outbound_stats.pending_htlcs_value_msat as i64
-                               - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
-                       0) as u64;
+               let outbound_capacity_msat = self.value_to_self_msat
+                               .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+                               .saturating_sub(
+                                       self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
+
+               let mut available_capacity_msat = outbound_capacity_msat;
+
+               if self.is_outbound() {
+                       // We should mind channel commit tx fee when computing how much of the available capacity
+                       // can be used in the next htlc. Mirrors the logic in send_htlc.
+                       //
+                       // The fee depends on whether the amount we will be sending is above dust or not,
+                       // and the answer will in turn change the amount itself â€” making it a circular
+                       // dependency.
+                       // This complicates the computation around dust-values, up to the one-htlc-value.
+                       let mut real_dust_limit_timeout_sat = self.holder_dust_limit_satoshis;
+                       if !self.opt_anchors() {
+                               real_dust_limit_timeout_sat += self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
+                       }
+
+                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
+                       let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+                       let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
+                       let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+
+                       // We will first subtract the fee as if we were above-dust. Then, if the resulting
+                       // value ends up being below dust, we have this fee available again. In that case,
+                       // match the value to right-below-dust.
+                       let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
+                       if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
+                               let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
+                               debug_assert!(one_htlc_difference_msat != 0);
+                               capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
+                               capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
+                               available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
+                       } else {
+                               available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+                       }
+               } else {
+                       // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
+                       // sending a new HTLC won't reduce their balance below our reserve threshold.
+                       let mut real_dust_limit_success_sat = self.counterparty_dust_limit_satoshis;
+                       if !self.opt_anchors() {
+                               real_dust_limit_success_sat += self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
+                       }
+
+                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
+                       let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
+
+                       let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000;
+                       let remote_balance_msat = (self.channel_value_satoshis * 1000 - self.value_to_self_msat)
+                               .saturating_sub(inbound_stats.pending_htlcs_value_msat);
+
+                       if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
+                               // If another HTLC's fee would reduce the remote's balance below the reserve limit
+                               // we've selected for them, we can only send dust HTLCs.
+                               available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+                       }
+               }
+
+               let mut next_outbound_htlc_minimum_msat = self.counterparty_htlc_minimum_msat;
+
+               // If we get close to our maximum dust exposure, we end up in a situation where we can send
+               // between zero and the remaining dust exposure limit remaining OR above the dust limit.
+               // Because we cannot express this as a simple min/max, we prefer to tell the user they can
+               // send above the dust limit (as the router can always overpay to meet the dust limit).
+               let mut remaining_msat_below_dust_exposure_limit = None;
+               let mut dust_exposure_dust_limit_msat = 0;
+
+               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
+                       (self.counterparty_dust_limit_satoshis, self.holder_dust_limit_satoshis)
+               } else {
+                       let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+                       (self.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
+                        self.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
+               };
+               let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+                       remaining_msat_below_dust_exposure_limit =
+                               Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
+               }
+
+               let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+                       remaining_msat_below_dust_exposure_limit = Some(cmp::min(
+                               remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
+                               self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
+               }
+
+               if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
+                       if available_capacity_msat < dust_exposure_dust_limit_msat {
+                               available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
+                       } else {
+                               next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
+                       }
+               }
+
+               available_capacity_msat = cmp::min(available_capacity_msat,
+                       self.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
+
+               if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
+                       available_capacity_msat = 0;
+               }
+
                AvailableBalances {
                        inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000
                                        - self.value_to_self_msat as i64
@@ -2642,10 +2802,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        - self.holder_selected_channel_reserve_satoshis as i64 * 1000,
                                0) as u64,
                        outbound_capacity_msat,
-                       next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64,
-                                       self.counterparty_max_htlc_value_in_flight_msat as i64
-                                               - outbound_stats.pending_htlcs_value_msat as i64),
-                               0) as u64,
+                       next_outbound_htlc_limit_msat: available_capacity_msat,
+                       next_outbound_htlc_minimum_msat,
                        balance_msat,
                }
        }
@@ -3068,7 +3226,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(())
        }
 
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
                where L::Target: Logger
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
@@ -3268,8 +3426,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                        log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
                                log_bytes!(self.channel_id));
-                       self.pending_monitor_updates.push(monitor_update);
-                       return Ok(self.pending_monitor_updates.last().unwrap());
+                       return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
                let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
@@ -3286,9 +3443,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
                        log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
-               self.pending_monitor_updates.push(monitor_update);
                self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
-               return Ok(self.pending_monitor_updates.last().unwrap());
+               return Ok(self.push_ret_blockable_mon_update(monitor_update));
        }
 
        /// Public version of the below, checking relevant preconditions first.
@@ -3403,8 +3559,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 
                        self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
-                       self.pending_monitor_updates.push(monitor_update);
-                       (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
+                       (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
                } else {
                        (None, Vec::new())
                }
@@ -3415,7 +3570,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
+       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
                where L::Target: Logger,
        {
                if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
@@ -3612,21 +3767,19 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
                        self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
-                       self.pending_monitor_updates.push(monitor_update);
-                       return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
+                       return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
                }
 
                match self.free_holding_cell_htlcs(logger) {
                        (Some(_), htlcs_to_fail) => {
-                               let mut additional_update = self.pending_monitor_updates.pop().unwrap();
+                               let mut additional_update = self.pending_monitor_updates.pop().unwrap().update;
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
 
                                self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                               self.pending_monitor_updates.push(monitor_update);
-                               Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+                               Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
                        },
                        (None, htlcs_to_fail) => {
                                if require_commitment {
@@ -3640,13 +3793,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
                                                log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
                                        self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                                       self.pending_monitor_updates.push(monitor_update);
-                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
                                } else {
                                        log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
                                        self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                                       self.pending_monitor_updates.push(monitor_update);
-                                       Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
                                }
                        }
                }
@@ -3835,7 +3986,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
-               self.pending_monitor_updates.clear();
+               let mut found_blocked = false;
+               self.pending_monitor_updates.retain(|upd| {
+                       if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
+                       if upd.blocked { found_blocked = true; }
+                       upd.blocked
+               });
 
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
@@ -4039,36 +4195,31 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish".to_owned()));
+                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
                }
 
                if msg.next_remote_commitment_number > 0 {
-                       match msg.data_loss_protect {
-                               OptionalField::Present(ref data_loss) => {
-                                       let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
-                                       let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret)
-                                               .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
-                                       if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
-                                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+                       let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
+                       let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+                               .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+                       if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
+                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+                       }
+                       if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
+                               macro_rules! log_and_panic {
+                                       ($err_msg: expr) => {
+                                               log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+                                               panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
                                        }
-                                       if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
-                                               macro_rules! log_and_panic {
-                                                       ($err_msg: expr) => {
-                                                               log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
-                                                               panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
-                                                       }
-                                               }
-                                               log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
-                                                       This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
-                                                       More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
-                                                       If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
-                                                       ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
-                                                       ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
-                                                       Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
-                                                       See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
-                                       }
-                               },
-                               OptionalField::Absent => {}
+                               }
+                               log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+                                       This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+                                       More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+                                       If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+                                       ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+                                       ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+                                       Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+                                       See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
                        }
                }
 
@@ -4355,7 +4506,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        Some(_) => false,
                        None => {
                                assert!(send_shutdown);
-                               let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+                               let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+                                       Ok(scriptpubkey) => scriptpubkey,
+                                       Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+                               };
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
                                        return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
                                }
@@ -4378,8 +4532,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       self.pending_monitor_updates.push(monitor_update);
-                       Some(self.pending_monitor_updates.last().unwrap())
+                       if self.push_blockable_mon_update(monitor_update) {
+                               self.pending_monitor_updates.last().map(|upd| &upd.update)
+                       } else { None }
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
@@ -4951,8 +5106,49 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
        }
 
-       pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> {
-               self.pending_monitor_updates.first()
+       pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
+               if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); }
+               self.pending_monitor_updates[0].update.update_id - 1
+       }
+
+       /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+       /// further blocked monitor update exists after the next.
+       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
+               for i in 0..self.pending_monitor_updates.len() {
+                       if self.pending_monitor_updates[i].blocked {
+                               self.pending_monitor_updates[i].blocked = false;
+                               return Some((&self.pending_monitor_updates[i].update,
+                                       self.pending_monitor_updates.len() > i + 1));
+                       }
+               }
+               None
+       }
+
+       /// Pushes a new monitor update into our monitor update queue, returning whether it should be
+       /// immediately given to the user for persisting or if it should be held as blocked.
+       fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
+               let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+               self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+                       update, blocked: !release_monitor
+               });
+               release_monitor
+       }
+
+       /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
+       /// it should be immediately given to the user for persisting or `None` if it should be held as
+       /// blocked.
+       fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+       -> Option<&ChannelMonitorUpdate> {
+               let release_monitor = self.push_blockable_mon_update(update);
+               if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
+       }
+
+       pub fn no_monitor_updates_pending(&self) -> bool {
+               self.pending_monitor_updates.is_empty()
+       }
+
+       pub fn complete_one_mon_update(&mut self, update_id: u64) {
+               self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
        }
 
        /// Returns true if funding_created was sent/received.
@@ -5323,7 +5519,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        htlc_basepoint: keys.htlc_basepoint,
                        first_per_commitment_point,
                        channel_flags: if self.config.announced_channel {1} else {0},
-                       shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+                       shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
                                Some(script) => script.clone().into_inner(),
                                None => Builder::new().into_script(),
                        }),
@@ -5389,7 +5585,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        delayed_payment_basepoint: keys.delayed_payment_basepoint,
                        htlc_basepoint: keys.htlc_basepoint,
                        first_per_commitment_point,
-                       shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+                       shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
                                Some(script) => script.clone().into_inner(),
                                None => Builder::new().into_script(),
                        }),
@@ -5651,19 +5847,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // valid, and valid in fuzzing mode's arbitrary validity criteria:
                let mut pk = [2; 33]; pk[1] = 0xff;
                let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
-               let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+               let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
                        let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap();
                        log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
-                       OptionalField::Present(DataLossProtect {
-                               your_last_per_commitment_secret: remote_last_secret,
-                               my_current_per_commitment_point: dummy_pubkey
-                       })
+                       remote_last_secret
                } else {
                        log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
-                       OptionalField::Present(DataLossProtect {
-                               your_last_per_commitment_secret: [0;32],
-                               my_current_per_commitment_point: dummy_pubkey,
-                       })
+                       [0;32]
                };
                msgs::ChannelReestablish {
                        channel_id: self.channel_id(),
@@ -5685,7 +5875,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
                        // overflow here.
                        next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1,
-                       data_loss_protect,
+                       your_last_per_commitment_secret: remote_last_secret,
+                       my_current_per_commitment_point: dummy_pubkey,
+                       // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+                       // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+                       // txid of that interactive transaction, else we MUST NOT set it.
+                       next_funding_txid: None,
                }
        }
 
@@ -5741,8 +5936,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
                }
 
-               if amount_msat < self.counterparty_htlc_minimum_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
+               let available_balances = self.get_available_balances();
+               if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+                               available_balances.next_outbound_htlc_minimum_msat)));
+               }
+
+               if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+                               available_balances.next_outbound_htlc_limit_msat)));
                }
 
                if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
@@ -5755,75 +5957,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let inbound_stats = self.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = self.get_outbound_pending_htlc_stats(None);
-               if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
-                       return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs)));
-               }
-               // Check their_max_htlc_value_in_flight_msat
-               if outbound_stats.pending_htlcs_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat)));
-               }
+               let need_holding_cell = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+               log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+                       if force_holding_cell { "into holding cell" }
+                       else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+                       else { "to peer" });
 
-               let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
-               let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger);
-               if !self.is_outbound() {
-                       // Check that we won't violate the remote channel reserve by adding this HTLC.
-                       let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
-                       let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None);
-                       let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000;
-                       if commitment_stats.remote_balance_msat < counterparty_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
-                               return Err(ChannelError::Ignore("Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_owned()));
-                       }
-               }
-
-               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
-                       (0, 0)
-               } else {
-                       let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
-                       (dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
-                               dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
-               };
-               let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis;
-               if amount_msat / 1000 < exposure_dust_limit_success_sats {
-                       let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat;
-                       if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
-                               return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
-                       }
-               }
-
-               let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
-               if amount_msat / 1000 <  exposure_dust_limit_timeout_sats {
-                       let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat;
-                       if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
-                               return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
-                       }
-               }
-
-               let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
-               if holder_balance_msat < amount_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, holder_balance_msat)));
-               }
-
-               // `2 *` and extra HTLC are for the fee spike buffer.
-               let commit_tx_fee_msat = if self.is_outbound() {
-                       let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
-                       FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(()))
-               } else { 0 };
-               if holder_balance_msat - amount_msat < commit_tx_fee_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", holder_balance_msat, commit_tx_fee_msat)));
-               }
-
-               // Check self.counterparty_selected_channel_reserve_satoshis (the amount we must keep as
-               // reserve for the remote to have something to claim if we misbehave)
-               let chan_reserve_msat = self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000;
-               if holder_balance_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", chan_reserve_msat)));
-               }
-
-               if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+               if need_holding_cell {
                        force_holding_cell = true;
                }
 
@@ -6000,8 +6140,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        Some(_) => {
                                let monitor_update = self.build_commitment_no_status_check(logger);
                                self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
-                               self.pending_monitor_updates.push(monitor_update);
-                               Ok(Some(self.pending_monitor_updates.last().unwrap()))
+                               Ok(self.push_ret_blockable_mon_update(monitor_update))
                        },
                        None => Ok(None)
                }
@@ -6031,7 +6170,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
        /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
-               target_feerate_sats_per_kw: Option<u32>)
+               target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
        -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        where SP::Target: SignerProvider {
                for htlc in self.pending_outbound_htlcs.iter() {
@@ -6047,6 +6186,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
                        }
                }
+               if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+                       return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+               }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
                if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
@@ -6062,7 +6204,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let update_shutdown_script = match self.shutdown_scriptpubkey {
                        Some(_) => false,
                        None if !chan_closed => {
-                               let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+                               // use override shutdown script if provided
+                               let shutdown_scriptpubkey = match override_shutdown_script {
+                                       Some(script) => script,
+                                       None => {
+                                               // otherwise, use the shutdown scriptpubkey provided by the signer
+                                               match signer_provider.get_shutdown_scriptpubkey() {
+                                                       Ok(scriptpubkey) => scriptpubkey,
+                                                       Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+                                               }
+                                       },
+                               };
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
                                        return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
                                }
@@ -6090,8 +6242,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       self.pending_monitor_updates.push(monitor_update);
-                       Some(self.pending_monitor_updates.last().unwrap())
+                       if self.push_blockable_mon_update(monitor_update) {
+                               self.pending_monitor_updates.last().map(|upd| &upd.update)
+                       } else { None }
                } else { None };
                let shutdown = msgs::Shutdown {
                        channel_id: self.channel_id,
@@ -6528,6 +6681,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                        (28, holder_max_accepted_htlcs, option),
                        (29, self.temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
+                       (33, self.pending_monitor_updates, vec_type),
                });
 
                Ok(())
@@ -6804,6 +6958,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut temporary_channel_id: Option<[u8; 32]> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
 
+               let mut pending_monitor_updates = Some(Vec::new());
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -6826,6 +6982,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (28, holder_max_accepted_htlcs, option),
                        (29, temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
+                       (33, pending_monitor_updates, vec_type),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -6995,7 +7152,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        channel_type: channel_type.unwrap(),
                        channel_keys_id,
 
-                       pending_monitor_updates: Vec::new(),
+                       pending_monitor_updates: pending_monitor_updates.unwrap(),
                })
        }
 }
@@ -7016,13 +7173,13 @@ mod tests {
        use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::ChannelTypeFeatures;
-       use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
+       use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
        use crate::ln::script::ShutdownScript;
        use crate::ln::chan_utils;
        use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
        use crate::chain::BestBlock;
        use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
-       use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
+       use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
        use crate::chain::transaction::OutPoint;
        use crate::routing::router::Path;
        use crate::util::config::UserConfig;
@@ -7087,17 +7244,17 @@ mod tests {
 
                fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
 
-               fn get_destination_script(&self) -> Script {
+               fn get_destination_script(&self) -> Result<Script, ()> {
                        let secp_ctx = Secp256k1::signing_only();
                        let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
                        let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
-                       Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()
+                       Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
                }
 
-               fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
+               fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
                        let secp_ctx = Secp256k1::signing_only();
                        let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
-                       ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))
+                       Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
                }
        }
 
@@ -7317,12 +7474,7 @@ mod tests {
                let msg = node_b_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
-               match msg.data_loss_protect {
-                       OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => {
-                               assert_eq!(your_last_per_commitment_secret, [0; 32]);
-                       },
-                       _ => panic!()
-               }
+               assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
 
                // Check that the commitment point in Node A's channel_reestablish message
                // is sane.
@@ -7330,12 +7482,7 @@ mod tests {
                let msg = node_a_chan.get_channel_reestablish(&&logger);
                assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
                assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
-               match msg.data_loss_protect {
-                       OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => {
-                               assert_eq!(your_last_per_commitment_secret, [0; 32]);
-                       },
-                       _ => panic!()
-               }
+               assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
        }
 
        #[test]
@@ -7517,7 +7664,7 @@ mod tests {
                }
        }
 
-       #[cfg(not(feature = "grind_signatures"))]
+       #[cfg(feature = "_test_vectors")]
        #[test]
        fn outbound_commitment_test() {
                use bitcoin::util::sighash;
@@ -7526,7 +7673,7 @@ mod tests {
                use bitcoin::hashes::hex::FromHex;
                use bitcoin::hash_types::Txid;
                use bitcoin::secp256k1::Message;
-               use crate::chain::keysinterface::EcdsaChannelSigner;
+               use crate::sign::EcdsaChannelSigner;
                use crate::ln::PaymentPreimage;
                use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
                use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
@@ -7550,6 +7697,7 @@ mod tests {
                        [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
                        10_000_000,
                        [0; 32],
+                       [0; 32],
                );
 
                assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],