Merge pull request #2362 from TheBlueMatt/2023-06-unblocked-mons-in-manager
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 23 Jun 2023 20:46:26 +0000 (20:46 +0000)
committerGitHub <noreply@github.com>
Fri, 23 Jun 2023 20:46:26 +0000 (20:46 +0000)
Move in-flight ChannelMonitorUpdates to ChannelManager

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index 664448f56f6a7dfbee8c23595293ad722bd3a107,69a69e7a1fa0e9b77eb085d6d76b9c4e36f47f6e..eaa2a88ccf666ababb62710193f3a3db82b867dc
@@@ -309,10 -309,10 +309,10 @@@ pub const INITIAL_COMMITMENT_NUMBER: u6
  
  pub const DEFAULT_MAX_HTLCS: u16 = 50;
  
 -pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
 +pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
        const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
        const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
 -      if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
 +      if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
  }
  
  #[cfg(not(test))]
@@@ -488,13 -488,13 +488,13 @@@ enum UpdateFulfillFetch 
  }
  
  /// The return type of get_update_fulfill_htlc_and_commit.
- pub enum UpdateFulfillCommitFetch<'a> {
+ pub enum UpdateFulfillCommitFetch {
        /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
        /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
        /// previously placed in the holding cell (and has since been removed).
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
-               monitor_update: &'a ChannelMonitorUpdate,
+               monitor_update: ChannelMonitorUpdate,
                /// The value of the HTLC which was claimed, in msat.
                htlc_value_msat: u64,
        },
@@@ -588,17 -588,10 +588,10 @@@ pub(crate) const DISCONNECT_PEER_AWAITI
  
  struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
-       /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
-       /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
-       /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
-       ///
-       /// [`ChannelManager`]: super::channelmanager::ChannelManager
-       blocked: bool,
  }
  
  impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
-       (2, blocked, required),
  });
  
  /// Contains everything about the channel including state, and various flags.
@@@ -869,14 -862,16 +862,12 @@@ pub(super) struct ChannelContext<Signer
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
  
-       /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
-       /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
-       /// completes we still need to be able to complete the persistence. Thus, we have to keep a
-       /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
-       pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
+       /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
+       /// store it here and only release it to the `ChannelManager` once it asks for it.
+       blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
  }
  
  impl<Signer: ChannelSigner> ChannelContext<Signer> {
 -      pub(crate) fn opt_anchors(&self) -> bool {
 -              self.channel_transaction_parameters.opt_anchors.is_some()
 -      }
 -
        /// Allowed in any state (including after shutdown)
        pub fn get_update_time_counter(&self) -> u32 {
                self.update_time_counter
                        ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
                                if $outbound == local { // "offered HTLC output"
                                        let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
 -                                      let htlc_tx_fee = if self.opt_anchors() {
 +                                      let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                0
                                        } else {
 -                                              feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
 +                                              feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
                                        };
                                        if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
                                                log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
                                        }
                                } else {
                                        let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
 -                                      let htlc_tx_fee = if self.opt_anchors() {
 +                                      let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                0
                                        } else {
 -                                              feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
 +                                              feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
                                        };
                                        if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
                                                log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
                        broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
                }
  
 -              let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
 -              let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
 +              let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
 +              let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
                let (value_to_self, value_to_remote) = if self.is_outbound() {
                        (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
                } else {
                let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
                                                                             value_to_a as u64,
                                                                             value_to_b as u64,
 -                                                                           self.channel_transaction_parameters.opt_anchors.is_some(),
                                                                             funding_pubkey_a,
                                                                             funding_pubkey_b,
                                                                             keys.clone(),
                        on_holder_tx_holding_cell_htlcs_count: 0,
                };
  
 -              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
 +              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
                        let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
 -                      (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
 -                              dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
 +                      (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
 +                              dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
                };
                let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
                let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
                        on_holder_tx_holding_cell_htlcs_count: 0,
                };
  
 -              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
 +              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
                        let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
 -                      (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
 -                              dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
 +                      (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
 +                              dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
                };
                let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
                let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
                        // dependency.
                        // This complicates the computation around dust-values, up to the one-htlc-value.
                        let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
 -                      if !context.opt_anchors() {
 -                              real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
 +                      if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                              real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
                        }
  
                        let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
                        // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
                        // sending a new HTLC won't reduce their balance below our reserve threshold.
                        let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
 -                      if !context.opt_anchors() {
 -                              real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
 +                      if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                              real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
                        }
  
                        let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
                let mut remaining_msat_below_dust_exposure_limit = None;
                let mut dust_exposure_dust_limit_msat = 0;
  
 -              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
 +              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
                } else {
                        let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
 -                      (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
 -                       context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
 +                      (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
 +                       context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
                if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
                let context = &self;
                assert!(context.is_outbound());
  
 -              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
 +              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
 -                      (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
 -                              context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
 +                      (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
 +                              context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
                let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
                }
  
                let num_htlcs = included_htlcs + addl_htlcs;
 -              let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
 +              let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
                #[cfg(any(test, fuzzing))]
                {
                        let mut fee = res;
                        if fee_spike_buffer_htlc.is_some() {
 -                              fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
 +                              fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
                        }
                        let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
                                + context.holding_cell_htlc_updates.len();
                let context = &self;
                assert!(!context.is_outbound());
  
 -              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
 +              let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
 -                      (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
 -                              context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
 +                      (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
 +                              context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
                let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
                }
  
                let num_htlcs = included_htlcs + addl_htlcs;
 -              let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
 +              let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
                #[cfg(any(test, fuzzing))]
                {
                        let mut fee = res;
                        if fee_spike_buffer_htlc.is_some() {
 -                              fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
 +                              fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
                        }
                        let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
                        let commitment_tx_info = CommitmentTxInfoCached {
@@@ -1975,16 -1971,16 +1966,16 @@@ pub(crate) fn get_legacy_default_holder
  // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
  // Note that num_htlcs should not include dust HTLCs.
  #[inline]
 -fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
 -      feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
 +fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
 +      feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
  }
  
  // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
  // Note that num_htlcs should not include dust HTLCs.
 -fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
 +fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
        // Note that we need to divide before multiplying to round properly,
        // since the lowest denomination of bitcoin on-chain is the satoshi.
 -      (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 +      (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
  }
  
  // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
@@@ -2259,7 -2255,7 +2250,7 @@@ impl<Signer: WriteableEcdsaChannelSigne
        }
  
        pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
-               let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+               let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
                        UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
                                // Even if we aren't supposed to let new monitor updates with commitment state
                                // matter what. Sadly, to push a new monitor update which flies before others
                                // already queued, we have to insert it into the pending queue and update the
                                // update_ids of all the following monitors.
-                               let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+                               if release_cs_monitor && msg.is_some() {
                                        let mut additional_update = self.build_commitment_no_status_check(logger);
                                        // build_commitment_no_status_check may bump latest_monitor_id but we want them
                                        // to be strictly increasing by one, so decrement it here.
                                        self.context.latest_monitor_update_id = monitor_update.update_id;
                                        monitor_update.updates.append(&mut additional_update.updates);
-                                       self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       self.context.pending_monitor_updates.len() - 1
                                } else {
-                                       let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
-                                               .unwrap_or(self.context.pending_monitor_updates.len());
-                                       let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+                                       let new_mon_id = self.context.blocked_monitor_updates.get(0)
                                                .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
                                        monitor_update.update_id = new_mon_id;
-                                       self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+                                       for held_update in self.context.blocked_monitor_updates.iter_mut() {
                                                held_update.update.update_id += 1;
                                        }
                                        if msg.is_some() {
                                                debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
                                                let update = self.build_commitment_no_status_check(logger);
-                                               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                                       update, blocked: true,
+                                               self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                                                       update,
                                                });
                                        }
-                                       insert_pos
-                               };
-                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
-                               UpdateFulfillCommitFetch::NewClaim {
-                                       monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
-                                               .expect("We just pushed the monitor update").update,
-                                       htlc_value_msat,
                                }
+                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+                               UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
                        },
                        UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
                }
                        }
                }
  
 -              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
 +              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
                        let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
 -                      (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
 -                              dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
 +                      (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
 +                              dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
                };
                let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
                Ok(())
        }
  
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
                where L::Target: Logger
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
                for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
                        if let Some(_) = htlc.transaction_output_index {
                                let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
 -                                      self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
 -                                      false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 +                                      self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
 +                                      &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
  
 -                              let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
 -                              let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
 +                              let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
 +                              let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
                                let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
        /// Public version of the below, checking relevant preconditions first.
        /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
        /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
                        self.free_holding_cell_htlcs(logger)
  
        /// Frees any pending commitment updates in the holding cell, generating the relevant messages
        /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
+       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
                where L::Target: Logger,
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
                }
  
                match self.free_holding_cell_htlcs(logger) {
-                       (Some(_), htlcs_to_fail) => {
-                               let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
+                       (Some(mut additional_update), htlcs_to_fail) => {
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.context.latest_monitor_update_id = monitor_update.update_id;
                let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
                let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
                let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
 -              let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
 +              let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
                let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
                if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
                        //TODO: auto-close after a number of failures?
        {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
-               let mut found_blocked = false;
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
-                       if upd.blocked { found_blocked = true; }
-                       upd.blocked
-               });
  
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
  
        pub fn shutdown<SP: Deref>(
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
-       ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+       ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        where SP::Target: SignerProvider
        {
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
+                       self.push_ret_blockable_mon_update(monitor_update)
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
                (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
        }
  
-       pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
-               if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
-               self.context.pending_monitor_updates[0].update.update_id - 1
+       /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+       pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+               if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+               self.context.blocked_monitor_updates[0].update.update_id - 1
        }
  
        /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
        /// further blocked monitor update exists after the next.
-       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
-               for i in 0..self.context.pending_monitor_updates.len() {
-                       if self.context.pending_monitor_updates[i].blocked {
-                               self.context.pending_monitor_updates[i].blocked = false;
-                               return Some((&self.context.pending_monitor_updates[i].update,
-                                       self.context.pending_monitor_updates.len() > i + 1));
-                       }
-               }
-               None
+       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+               if self.context.blocked_monitor_updates.is_empty() { return None; }
+               Some((self.context.blocked_monitor_updates.remove(0).update,
+                       !self.context.blocked_monitor_updates.is_empty()))
        }
  
-       /// Pushes a new monitor update into our monitor update queue, returning whether it should be
-       /// immediately given to the user for persisting or if it should be held as blocked.
-       fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
-               let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
-               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                       update, blocked: !release_monitor
-               });
-               release_monitor
-       }
-       /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
-       /// it should be immediately given to the user for persisting or `None` if it should be held as
-       /// blocked.
+       /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+       /// immediately given to the user for persisting or `None` if it should be held as blocked.
        fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
-       -> Option<&ChannelMonitorUpdate> {
-               let release_monitor = self.push_blockable_mon_update(update);
-               if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
-       }
-       pub fn no_monitor_updates_pending(&self) -> bool {
-               self.context.pending_monitor_updates.is_empty()
-       }
-       pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if upd.update.update_id <= update_id {
-                               assert!(!upd.blocked, "Completed update must have flown");
-                               false
-                       } else { true }
-               });
-       }
-       pub fn complete_one_mon_update(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
+       -> Option<ChannelMonitorUpdate> {
+               let release_monitor = self.context.blocked_monitor_updates.is_empty();
+               if !release_monitor {
+                       self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                               update,
+                       });
+                       None
+               } else {
+                       Some(update)
+               }
        }
  
-       /// Returns an iterator over all unblocked monitor updates which have not yet completed.
-       pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
-               self.context.pending_monitor_updates.iter()
-                       .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
+       pub fn blocked_monitor_updates_pending(&self) -> usize {
+               self.context.blocked_monitor_updates.len()
        }
  
        /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
                                                && info.next_holder_htlc_id == self.context.next_holder_htlc_id
                                                && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
                                                && info.feerate == self.context.feerate_per_kw {
 -                                                      let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
 +                                                      let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
                                                        assert_eq!(actual_fee, info.fee);
                                                }
                                }
  
                        for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
                                log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
 -                                      encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
 -                                      encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
 +                                      encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
 +                                      encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
                                        log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
                                        log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
                        }
        pub fn send_htlc_and_commit<L: Deref>(
                &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
-       ) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+       ) -> Result<Option<ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
                let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
                        onion_routing_packet, false, skimmed_fee_msat, logger);
                if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
        /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        where SP::Target: SignerProvider {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
+                       self.push_ret_blockable_mon_update(monitor_update)
                } else { None };
                let shutdown = msgs::Shutdown {
                        channel_id: self.context.channel_id,
@@@ -5490,7 -5435,7 +5430,7 @@@ impl<Signer: WriteableEcdsaChannelSigne
                let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
  
                let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
 -              let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
 +              let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
                if value_to_self_msat < commitment_tx_fee {
                        return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
                }
                                        is_outbound_from_holder: true,
                                        counterparty_parameters: None,
                                        funding_outpoint: None,
 -                                      opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
 -                                      opt_non_zero_fee_anchors: None
 +                                      channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
  
                                channel_type,
                                channel_keys_id,
  
-                               pending_monitor_updates: Vec::new(),
+                               blocked_monitor_updates: Vec::new(),
                        }
                })
        }
                // whatever reason.
                if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
                        self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
 -                      assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
 -                      self.context.channel_transaction_parameters.opt_anchors = None;
 +                      assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
                } else if self.context.channel_type.supports_scid_privacy() {
                        self.context.channel_type.clear_scid_privacy();
                } else {
                        self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
                }
 +              self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
                Ok(self.get_open_channel(chain_hash))
        }
  
                        if channel_type != ChannelTypeFeatures::only_static_remote_key() {
                                return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
                        }
 -                      self.context.channel_type = channel_type;
 +                      self.context.channel_type = channel_type.clone();
 +                      self.context.channel_transaction_parameters.channel_type_features = channel_type;
                }
  
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
@@@ -5991,6 -5936,7 +5931,6 @@@ impl<Signer: WriteableEcdsaChannelSigne
                        }
                        channel_type
                };
 -              let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
  
                let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
                let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
                // check if the funder's amount for the initial commitment tx is sufficient
                // for full fee payment plus a few HTLCs to ensure the channel will be useful.
                let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
 -              let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
 +              let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
                if funders_amount_msat / 1000 < commitment_tx_fee {
                        return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
                }
                                                pubkeys: counterparty_pubkeys,
                                        }),
                                        funding_outpoint: None,
 -                                      opt_anchors: if opt_anchors { Some(()) } else { None },
 -                                      opt_non_zero_fee_anchors: None
 +                                      channel_type_features: channel_type.clone()
                                },
                                funding_transaction: None,
  
                                channel_type,
                                channel_keys_id,
  
-                               pending_monitor_updates: Vec::new(),
+                               blocked_monitor_updates: Vec::new(),
                        }
                };
  
@@@ -6857,7 -6804,7 +6797,7 @@@ impl<Signer: WriteableEcdsaChannelSigne
                        (28, holder_max_accepted_htlcs, option),
                        (29, self.context.temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, self.context.pending_monitor_updates, vec_type),
+                       (33, self.context.blocked_monitor_updates, vec_type),
                        (35, pending_outbound_skimmed_fees, optional_vec),
                        (37, holding_cell_skimmed_fees, optional_vec),
                });
@@@ -7081,7 -7028,7 +7021,7 @@@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> 
                        _ => return Err(DecodeError::InvalidValue),
                };
  
 -              let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
 +              let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
                let funding_transaction = Readable::read(reader)?;
  
                let counterparty_cur_commitment_point = Readable::read(reader)?;
                let mut temporary_channel_id: Option<[u8; 32]> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
  
-               let mut pending_monitor_updates = Some(Vec::new());
+               let mut blocked_monitor_updates = Some(Vec::new());
  
                let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
                let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
                        (28, holder_max_accepted_htlcs, option),
                        (29, temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, pending_monitor_updates, vec_type),
+                       (33, blocked_monitor_updates, vec_type),
                        (35, pending_outbound_skimmed_fees_opt, optional_vec),
                        (37, holding_cell_skimmed_fees_opt, optional_vec),
                });
                        return Err(DecodeError::UnknownRequiredFeature);
                }
  
 +              // ChannelTransactionParameters may have had an empty features set upon deserialization.
 +              // To account for that, we're proactively setting/overriding the field here.
 +              channel_parameters.channel_type_features = chan_features.clone();
 +
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
  
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
  
-                               pending_monitor_updates: pending_monitor_updates.unwrap(),
+                               blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        }
                })
        }
@@@ -7595,13 -7538,13 +7535,13 @@@ mod tests 
                // the dust limit check.
                let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
                let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
 -              let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors());
 +              let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
                assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
  
                // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
                // of the HTLCs are seen to be above the dust limit.
                node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
 -              let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors());
 +              let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
                let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
                let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
                let config = UserConfig::default();
                let mut chan = OutboundV1Channel::<EnforcingSigner>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
  
 -              let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors());
 -              let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors());
 +              let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
 +              let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
  
                // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
                // counted as dust when it shouldn't be.
 -              let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
 +              let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
                let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
  
                // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
 -              let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
 +              let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
                let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
                chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
  
                // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
 -              let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
 +              let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
                let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
  
                // If swapped: this HTLC would be counted as dust when it shouldn't be.
 -              let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
 +              let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
                let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
  
                macro_rules! test_commitment {
                        ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
 -                              chan.context.channel_transaction_parameters.opt_anchors = None;
 -                              test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
 +                              chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
 +                              test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
                        };
                }
  
                macro_rules! test_commitment_with_anchors {
                        ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
 -                              chan.context.channel_transaction_parameters.opt_anchors = Some(());
 -                              test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
 +                              chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
 +                              test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
                        };
                }
  
                                        let ref htlc = htlcs[$htlc_idx];
                                        let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
                                                chan.context.get_counterparty_selected_contest_delay().unwrap(),
 -                                              &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
 +                                              &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
 -                                      let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
 +                                      let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
                                        let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
                                        assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
  
                                        }
  
                                        let htlc_sig = htlc_sig_iter.next().unwrap();
 -                                      let num_anchors = if $opt_anchors { 2 } else { 0 };
 +                                      let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
                                        assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
  
                                        let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 2185;
                chan.context.holder_dust_limit_satoshis = 2001;
 +              let cached_channel_type = chan.context.channel_type;
 +              chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
  
                test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
                                 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 3702;
                chan.context.holder_dust_limit_satoshis = 546;
 +              chan.context.channel_type = cached_channel_type.clone();
  
                test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
                                 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 3687;
                chan.context.holder_dust_limit_satoshis = 3001;
 +              chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
  
                test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
                                 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 4914;
                chan.context.holder_dust_limit_satoshis = 546;
 +              chan.context.channel_type = cached_channel_type.clone();
  
                test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
                                 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 4894;
                chan.context.holder_dust_limit_satoshis = 4001;
 +              chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
  
                test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
                                 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 9651180;
                chan.context.holder_dust_limit_satoshis = 546;
 +              chan.context.channel_type = cached_channel_type.clone();
  
                test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
                                 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 6216010;
                chan.context.holder_dust_limit_satoshis = 4001;
 +              chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
  
                test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
                                 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 9651936;
                chan.context.holder_dust_limit_satoshis = 546;
 +              chan.context.channel_type = cached_channel_type;
  
                test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
                                 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
                                  "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
                } );
  
 +              chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
                test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
                                 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
                                 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
                let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
                let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
                let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
 -              assert!(simple_anchors_init.requires_unknown_bits());
 -              assert!(simple_anchors_channel_type.requires_unknown_bits());
 +              assert!(!simple_anchors_init.requires_unknown_bits());
 +              assert!(!simple_anchors_channel_type.requires_unknown_bits());
  
                // First, we'll try to open a channel between A and B where A requests a channel type for
                // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
index eb682c34e01e9ad0e5484c1b2c8b8e3a1a81e15a,f38c6c13da54e6dde3ba09e98ea527a712a99376..36af4de279dcd976c0c4bf9ac9217cfe9dda3ae6
@@@ -633,6 -633,13 +633,13 @@@ pub(super) struct PeerState<Signer: Cha
        /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
+       /// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
+       /// user but which have not yet completed.
+       ///
+       /// Note that the channel may no longer exist. For example if the channel was closed but we
+       /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
+       /// for a missing channel.
+       in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
        /// Map from a specific channel to some action(s) that should be taken when all pending
        /// [`ChannelMonitorUpdate`]s for the channel complete updating.
        ///
@@@ -668,6 -675,7 +675,7 @@@ impl <Signer: ChannelSigner> PeerState<
                        return false
                }
                self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+                       && self.in_flight_monitor_updates.is_empty()
        }
  
        // Returns a count of all channels we have with this peer, including pending channels.
@@@ -1811,7 -1819,7 +1819,7 @@@ macro_rules! emit_channel_ready_event 
  }
  
  macro_rules! handle_monitor_update_completion {
-       ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+       ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
                        &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
  }
  
  macro_rules! handle_new_monitor_update {
-       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
                // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
                // any case so that it won't deadlock.
                debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        log_bytes!($chan.context.channel_id()[..]));
-                               Ok(())
+                               Ok(false)
                        },
                        ChannelMonitorUpdateStatus::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
                                        log_bytes!($chan.context.channel_id()[..]));
                                update_maps_on_chan_removal!($self, &$chan.context);
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
+                               let res = Err(MsgHandleErrInternal::from_finish_shutdown(
                                        "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
                                        $chan.context.get_user_id(), $chan.context.force_shutdown(false),
                                        $self.get_channel_update_for_broadcast(&$chan).ok()));
                                res
                        },
                        ChannelMonitorUpdateStatus::Completed => {
-                               $chan.complete_one_mon_update($update_id);
-                               if $chan.no_monitor_updates_pending() {
-                                       handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
-                               }
-                               Ok(())
+                               $completed;
+                               Ok(true)
                        },
                }
        } };
-       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
-               handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
+               handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
+                       $per_peer_state_lock, $chan, _internal, $remove,
+                       handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
+       };
+       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
+               handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
+       };
+       ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+               let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
+                       .or_insert_with(Vec::new);
+               // During startup, we push monitor updates as background events through to here in
+               // order to replay updates that were in-flight when we shut down. Thus, we have to
+               // filter for uniqueness here.
+               let idx = in_flight_updates.iter().position(|upd| upd == &$update)
+                       .unwrap_or_else(|| {
+                               in_flight_updates.push($update);
+                               in_flight_updates.len() - 1
+                       });
+               let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
+               handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
+                       $per_peer_state_lock, $chan, _internal, $remove,
+                       {
+                               let _ = in_flight_updates.remove(idx);
+                               if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
+                                       handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
+                               }
+                       })
+       } };
+       ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+               handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
        }
  }
  
@@@ -2309,9 -2343,8 +2343,8 @@@ wher
  
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update_opt.take() {
-                                               let update_id = monitor_update.update_id;
-                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
-                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                       peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
                                        }
  
                                        if chan_entry.get().is_shutdown() {
                                        }, onion_packet, None, &self.logger);
                                match break_chan_entry!(self, send_res, chan) {
                                        Some(monitor_update) => {
-                                               let update_id = monitor_update.update_id;
-                                               let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
-                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
-                                                       break Err(e);
-                                               }
-                                               if update_res == ChannelMonitorUpdateStatus::InProgress {
-                                                       // Note that MonitorUpdateInProgress here indicates (per function
-                                                       // docs) that we will resend the commitment update once monitor
-                                                       // updating completes. Therefore, we must return an error
-                                                       // indicating that it is unsafe to retry the payment wholesale,
-                                                       // which we do in the send_payment check for
-                                                       // MonitorUpdateInProgress, below.
-                                                       return Err(APIError::MonitorUpdateInProgress);
+                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+                                                       Err(e) => break Err(e),
+                                                       Ok(false) => {
+                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                               // docs) that we will resend the commitment update once monitor
+                                                               // updating completes. Therefore, we must return an error
+                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                               // which we do in the send_payment check for
+                                                               // MonitorUpdateInProgress, below.
+                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                       },
+                                                       Ok(true) => {},
                                                }
                                        },
                                        None => { },
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                },
                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
-                                       let update_res = self.chain_monitor.update_channel(funding_txo, &update);
+                                       let mut updated_chan = false;
                                        let res = {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
                                                                hash_map::Entry::Occupied(mut chan) => {
-                                                                       handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan)
+                                                                       updated_chan = true;
+                                                                       handle_new_monitor_update!(self, funding_txo, update.clone(),
+                                                                               peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
                                                                },
                                                                hash_map::Entry::Vacant(_) => Ok(()),
                                                        }
                                                } else { Ok(()) }
                                        };
+                                       if !updated_chan {
+                                               // TODO: Track this as in-flight even though the channel is closed.
+                                               let _ = self.chain_monitor.update_channel(funding_txo, &update);
+                                       }
                                        // TODO: If this channel has since closed, we're likely providing a payment
                                        // preimage update, which we must ensure is durable! We currently don't,
                                        // however, ensure that.
                                                                log_bytes!(chan_id), action);
                                                        peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                }
-                                               let update_id = monitor_update.update_id;
-                                               let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
-                                               let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+                                               let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
                                                        peer_state, per_peer_state, chan);
                                                if let Err(e) = res {
                                                        // TODO: This is a *critical* error - we probably updated the outbound edge
                                hash_map::Entry::Vacant(_) => return,
                        }
                };
-               log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
-                       highest_applied_update_id, channel.get().context.get_latest_monitor_update_id());
+               let remaining_in_flight =
+                       if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
+                               pending.retain(|upd| upd.update_id > highest_applied_update_id);
+                               pending.len()
+                       } else { 0 };
+               log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
+                       highest_applied_update_id, channel.get().context.get_latest_monitor_update_id(),
+                       remaining_in_flight);
                if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id {
                        return;
                }
-               handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
+               handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
        }
  
        /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
  
                                let chan = e.insert(chan);
-                               let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
-                                       per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+                               let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
+                                       per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
+                                       { peer_state.channel_by_id.remove(&new_channel_id) });
  
                                // Note that we reply with the new channel_id in error messages if we gave up on the
                                // channel, not the temporary_channel_id. This is compatible with ourselves, but the
                                if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
                                        res.0 = None;
                                }
-                               res
+                               res.map(|_| ())
                        }
                }
        }
                                let monitor = try_chan_entry!(self,
                                        chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
                                let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
-                               let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
+                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                        // it. Previously, full_stack_target found an (unreachable) panic when the
                                                shutdown_finish.0.take();
                                        }
                                }
-                               res
+                               res.map(|_| ())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
  
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update_opt {
-                                               let update_id = monitor_update.update_id;
-                                               let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
-                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
+                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                       peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
                                        }
                                        break Ok(());
                                },
                                let funding_txo = chan.get().context.get_funding_txo();
                                let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
                                if let Some(monitor_update) = monitor_update_opt {
-                                       let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
-                                       let update_id = monitor_update.update_id;
-                                       handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
-                                               peer_state, per_peer_state, chan)
+                                       handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+                                               peer_state, per_peer_state, chan).map(|_| ())
                                } else { Ok(()) }
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                                        let funding_txo = chan.get().context.get_funding_txo();
                                        let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
                                        let res = if let Some(monitor_update) = monitor_update_opt {
-                                               let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
-                                               let update_id = monitor_update.update_id;
-                                               handle_new_monitor_update!(self, update_res, update_id,
-                                                       peer_state_lock, peer_state, per_peer_state, chan)
+                                               handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+                                                       peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
                                        } else { Ok(()) };
                                        (htlcs_to_fail, res)
                                },
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
  
-                                                       let update_res = self.chain_monitor.update_channel(
-                                                               funding_txo.expect("channel is live"), monitor_update);
-                                                       let update_id = monitor_update.update_id;
                                                        let channel_id: [u8; 32] = *channel_id;
-                                                       let res = handle_new_monitor_update!(self, update_res, update_id,
+                                                       let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
                                                                peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
                                                                peer_state.channel_by_id.remove(&channel_id));
                                                        if res.is_err() {
                inflight_htlcs
        }
  
 -      #[cfg(any(test, fuzzing, feature = "_test_utils"))]
 +      #[cfg(any(test, feature = "_test_utils"))]
        pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
                let events = core::cell::RefCell::new(Vec::new());
                let event_handler = |event: events::Event| events.borrow_mut().push(event);
                                        if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
                                                log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                        log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
-                                               let update_res = self.chain_monitor.update_channel(channel_funding_outpoint, monitor_update);
-                                               let update_id = monitor_update.update_id;
-                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id,
+                                               if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                        peer_state_lck, peer_state, per_peer_state, chan)
                                                {
                                                        errors.push((e, counterparty_node_id));
@@@ -7030,6 -7062,7 +7062,7 @@@ wher
                                                inbound_v1_channel_by_id: HashMap::new(),
                                                latest_features: init_msg.features.clone(),
                                                pending_msg_events: Vec::new(),
+                                               in_flight_monitor_updates: BTreeMap::new(),
                                                monitor_update_blocked_actions: BTreeMap::new(),
                                                actions_blocking_raa_monitor_updates: BTreeMap::new(),
                                                is_connected: true,
@@@ -7861,6 -7894,16 +7894,16 @@@ wher
                        pending_claiming_payments = None;
                }
  
+               let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
+               for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
+                       for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
+                               if !updates.is_empty() {
+                                       if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+                                       in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
+                               }
+                       }
+               }
                write_tlv_fields!(writer, {
                        (1, pending_outbound_payments_no_retry, required),
                        (2, pending_intercepted_htlcs, option),
                        (7, self.fake_scid_rand_bytes, required),
                        (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
                        (9, htlc_purposes, vec_type),
+                       (10, in_flight_monitor_updates, option),
                        (11, self.probing_cookie_secret, required),
                        (13, htlc_onion_fields, optional_vec),
                });
@@@ -8087,7 -8131,7 +8131,7 @@@ wher
                let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
-               let mut pending_background_events = Vec::new();
+               let mut close_background_events = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
-                               if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
-                                       // If the channel is ahead of the monitor, return InvalidValue:
-                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
-                                               log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
-                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
-                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
-                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
-                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
-                                       return Err(DecodeError::InvalidValue);
-                               } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
+                               if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                                channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                                log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true);
                                        if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
-                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                               close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id, funding_txo, update
                                                });
                                        }
                                        log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
                                                log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
                                                monitor.get_latest_update_id());
-                                       channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               close_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }
  
                        claimable_htlcs_list.push((payment_hash, previous_hops));
                }
  
-               let peer_count: u64 = Readable::read(reader)?;
-               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>)>()));
-               for _ in 0..peer_count {
-                       let peer_pubkey = Readable::read(reader)?;
-                       let peer_state = PeerState {
-                               channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
+               let peer_state_from_chans = |channel_by_id| {
+                       PeerState {
+                               channel_by_id,
                                outbound_v1_channel_by_id: HashMap::new(),
                                inbound_v1_channel_by_id: HashMap::new(),
-                               latest_features: Readable::read(reader)?,
+                               latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
+                               in_flight_monitor_updates: BTreeMap::new(),
                                monitor_update_blocked_actions: BTreeMap::new(),
                                actions_blocking_raa_monitor_updates: BTreeMap::new(),
                                is_connected: false,
-                       };
+                       }
+               };
+               let peer_count: u64 = Readable::read(reader)?;
+               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>)>()));
+               for _ in 0..peer_count {
+                       let peer_pubkey = Readable::read(reader)?;
+                       let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+                       let mut peer_state = peer_state_from_chans(peer_chans);
+                       peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
                }
  
                        }
                }
  
-               for (node_id, peer_mtx) in per_peer_state.iter() {
-                       let peer_state = peer_mtx.lock().unwrap();
-                       for (_, chan) in peer_state.channel_by_id.iter() {
-                               for update in chan.uncompleted_unblocked_mon_updates() {
-                                       if let Some(funding_txo) = chan.context.get_funding_txo() {
-                                               log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
-                                                       update.update_id, log_bytes!(funding_txo.to_channel_id()));
-                                               pending_background_events.push(
-                                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                               counterparty_node_id: *node_id, funding_txo, update: update.clone(),
-                                                       });
-                                       } else {
-                                               return Err(DecodeError::InvalidValue);
-                                       }
-                               }
-                       }
-               }
                let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
  
                let mut pending_claiming_payments = Some(HashMap::new());
                let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
+               let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
                        (7, fake_scid_rand_bytes, option),
                        (8, events_override, option),
                        (9, claimable_htlc_purposes, vec_type),
+                       (10, in_flight_monitor_updates, option),
                        (11, probing_cookie_secret, option),
                        (13, claimable_htlc_onion_fields, optional_vec),
                });
                        retry_lock: Mutex::new(())
                };
  
+               // We have to replay (or skip, if they were completed after we wrote the `ChannelManager`)
+               // each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to
+               // check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we
+               // replayed, and for each monitor update we have to replay we have to ensure there's a
+               // `ChannelMonitor` for it.
+               //
+               // In order to do so we first walk all of our live channels (so that we can check their
+               // state immediately after doing the update replays, when we have the `update_id`s
+               // available) and then walk any remaining in-flight updates.
+               //
+               // Because the actual handling of the in-flight updates is the same, it's macro'ized here:
+               let mut pending_background_events = Vec::new();
+               macro_rules! handle_in_flight_updates {
+                       ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
+                        $monitor: expr, $peer_state: expr, $channel_info_log: expr
+                       ) => { {
+                               let mut max_in_flight_update_id = 0;
+                               $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
+                               for update in $chan_in_flight_upds.iter() {
+                                       log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
+                                               update.update_id, $channel_info_log, log_bytes!($funding_txo.to_channel_id()));
+                                       max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
+                                       pending_background_events.push(
+                                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                       counterparty_node_id: $counterparty_node_id,
+                                                       funding_txo: $funding_txo,
+                                                       update: update.clone(),
+                                               });
+                               }
+                               if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
+                                       log_error!(args.logger, "Duplicate in-flight monitor update set for the same channel!");
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                               max_in_flight_update_id
+                       } }
+               }
+               for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
+                       let mut peer_state_lock = peer_state_mtx.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
+                       for (_, chan) in peer_state.channel_by_id.iter() {
+                               // Channels that were persisted have to be funded, otherwise they should have been
+                               // discarded.
+                               let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                               let monitor = args.channel_monitors.get(&funding_txo)
+                                       .expect("We already checked for monitor presence when loading channels");
+                               let mut max_in_flight_update_id = monitor.get_latest_update_id();
+                               if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
+                                       if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
+                                               max_in_flight_update_id = cmp::max(max_in_flight_update_id,
+                                                       handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
+                                                               funding_txo, monitor, peer_state, ""));
+                                       }
+                               }
+                               if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
+                                       // If the channel is ahead of the monitor, return InvalidValue:
+                                       log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+                                               log_bytes!(chan.context.channel_id()), monitor.get_latest_update_id(), max_in_flight_update_id);
+                                       log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                       }
+               }
+               if let Some(in_flight_upds) = in_flight_monitor_updates {
+                       for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
+                               if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
+                                       // Now that we've removed all the in-flight monitor updates for channels that are
+                                       // still open, we need to replay any monitor updates that are for closed channels,
+                                       // creating the neccessary peer_state entries as we go.
+                                       let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
+                                               Mutex::new(peer_state_from_chans(HashMap::new()))
+                                       });
+                                       let mut peer_state = peer_state_mutex.lock().unwrap();
+                                       handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
+                                               funding_txo, monitor, peer_state, "closed ");
+                               } else {
+                                       log_error!(args.logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
+                                       log_error!(args.logger, " The ChannelMonitor for channel {} is missing.",
+                                               log_bytes!(funding_txo.to_channel_id()));
+                                       log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+                                       log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+                                       log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+                                       log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                       }
+               }
+               // Note that we have to do the above replays before we push new monitor updates.
+               pending_background_events.append(&mut close_background_events);
                {
                        // If we're tracking pending payments, ensure we haven't lost any by looking at the
                        // ChannelMonitor data for any channels for which we do not have authorative state