Remove an unused use (ChannelSigner)
[rust-lightning] / lightning / src / ln / channel.rs
index bf1ac638c44645473402c848088fbd3727d389f0..e559ac3355007808a18abe58acb562cf8e2c112f 100644 (file)
@@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
 use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -41,7 +41,7 @@ use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
 use crate::util::logger::Logger;
 use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
 use crate::util::scid_utils::scid_from_parts;
 
 use crate::io;
@@ -224,6 +224,7 @@ struct OutboundHTLCOutput {
        payment_hash: PaymentHash,
        state: OutboundHTLCState,
        source: HTLCSource,
+       skimmed_fee_msat: Option<u64>,
 }
 
 /// See AwaitingRemoteRevoke ChannelState for more info
@@ -235,6 +236,8 @@ enum HTLCUpdateAwaitingACK {
                payment_hash: PaymentHash,
                source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket,
+               // The extra fee we're skimming off the top of this HTLC.
+               skimmed_fee_msat: Option<u64>,
        },
        ClaimHTLC {
                payment_preimage: PaymentPreimage,
@@ -485,13 +488,13 @@ enum UpdateFulfillFetch {
 }
 
 /// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch<'a> {
+pub enum UpdateFulfillCommitFetch {
        /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
        /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
        /// previously placed in the holding cell (and has since been removed).
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
-               monitor_update: &'a ChannelMonitorUpdate,
+               monitor_update: ChannelMonitorUpdate,
                /// The value of the HTLC which was claimed, in msat.
                htlc_value_msat: u64,
        },
@@ -524,6 +527,10 @@ pub(super) struct ReestablishResponses {
 }
 
 /// The return type of `force_shutdown`
+///
+/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
+/// channel's counterparty_node_id and channel_id).
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
@@ -583,21 +590,41 @@ pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 
+/// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
+/// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
+/// exceeding this age limit will be force-closed and purged from memory.
+pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
+
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
-       /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
-       /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
-       /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
-       ///
-       /// [`ChannelManager`]: super::channelmanager::ChannelManager
-       blocked: bool,
 }
 
 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
-       (2, blocked, required),
 });
 
+/// Contains all state common to unfunded inbound/outbound channels.
+pub(super) struct UnfundedChannelContext {
+       /// A counter tracking how many ticks have elapsed since this unfunded channel was
+       /// created. If this unfunded channel reaches peer has yet to respond after reaching
+       /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
+       ///
+       /// This is so that we don't keep channels around that haven't progressed to a funded state
+       /// in a timely manner.
+       unfunded_channel_age_ticks: usize,
+}
+
+impl UnfundedChannelContext {
+       /// Determines whether we should force-close and purge this unfunded channel from memory due to it
+       /// having reached the unfunded channel age limit.
+       ///
+       /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+       pub fn should_expire_unfunded_channel(&mut self) -> bool {
+               self.unfunded_channel_age_ticks += 1;
+               self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
+       }
+}
+
 /// Contains everything about the channel including state, and various flags.
 pub(super) struct ChannelContext<Signer: ChannelSigner> {
        config: LegacyChannelConfig,
@@ -866,11 +893,9 @@ pub(super) struct ChannelContext<Signer: ChannelSigner> {
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
 
-       /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
-       /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
-       /// completes we still need to be able to complete the persistence. Thus, we have to keep a
-       /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
-       pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
+       /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
+       /// store it here and only release it to the `ChannelManager` once it asks for it.
+       blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
 }
 
 impl<Signer: ChannelSigner> ChannelContext<Signer> {
@@ -909,6 +934,34 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
        }
 
+       /// shutdown state returns the state of the channel in its various stages of shutdown
+       pub fn shutdown_state(&self) -> ChannelShutdownState {
+               if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
+                       return ChannelShutdownState::ShutdownComplete;
+               }
+               if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
+                       return ChannelShutdownState::ShutdownInitiated;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
+                       return ChannelShutdownState::ResolvingHTLCs;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
+                       return ChannelShutdownState::NegotiatingClosingFee;
+               }
+               return ChannelShutdownState::NotShuttingDown;
+       }
+
+       fn closing_negotiation_ready(&self) -> bool {
+               self.pending_inbound_htlcs.is_empty() &&
+               self.pending_outbound_htlcs.is_empty() &&
+               self.pending_update_fee.is_none() &&
+               self.channel_state &
+               (BOTH_SIDES_SHUTDOWN_MASK |
+                       ChannelState::AwaitingRemoteRevoke as u32 |
+                       ChannelState::PeerDisconnected as u32 |
+                       ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+       }
+
        /// Returns true if this channel is currently available for use. This is a superset of
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
@@ -944,9 +997,9 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                &self.channel_type
        }
 
-       /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
-       /// is_usable() returns true).
-       /// Allowed in any state (including after shutdown)
+       /// Gets the channel's `short_channel_id`.
+       ///
+       /// Will return `None` if the channel hasn't been confirmed yet.
        pub fn get_short_channel_id(&self) -> Option<u64> {
                self.short_channel_id
        }
@@ -969,7 +1022,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        }
 
        /// Returns the funding_txo we either got from our peer, or were given by
-       /// get_outbound_funding_created.
+       /// get_funding_created.
        pub fn get_funding_txo(&self) -> Option<OutPoint> {
                self.channel_transaction_parameters.funding_outpoint
        }
@@ -1065,8 +1118,18 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
        }
 
-       pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
-               self.config.options.max_dust_htlc_exposure_msat
+       pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+               fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+       where F::Target: FeeEstimator
+       {
+               match self.config.options.max_dust_htlc_exposure {
+                       MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
+                               let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
+                                       ConfirmationTarget::HighPriority);
+                               feerate_per_kw as u64 * multiplier
+                       },
+                       MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+               }
        }
 
        /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
@@ -1404,7 +1467,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        #[inline]
        /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
        /// will sign and send to our counterparty.
-       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+       /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
        fn build_remote_transaction_keys(&self) -> TxCreationKeys {
                //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
                //may see payments to it!
@@ -1539,7 +1602,10 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        /// Doesn't bother handling the
        /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
        /// corner case properly.
-       pub fn get_available_balances(&self) -> AvailableBalances {
+       pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+       -> AvailableBalances
+       where F::Target: FeeEstimator
+       {
                let context = &self;
                // Note that we have to handle overflow due to the above case.
                let inbound_stats = context.get_inbound_pending_htlc_stats(None);
@@ -1621,6 +1687,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                // send above the dust limit (as the router can always overpay to meet the dust limit).
                let mut remaining_msat_below_dust_exposure_limit = None;
                let mut dust_exposure_dust_limit_msat = 0;
+               let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
 
                let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
@@ -1630,17 +1697,17 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                         context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
                let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
                        remaining_msat_below_dust_exposure_limit =
-                               Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                               Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
 
                let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
                        remaining_msat_below_dust_exposure_limit = Some(cmp::min(
                                remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
-                               context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                               max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
                }
 
@@ -1986,7 +2053,7 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
 
 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
+// calling channel_id() before we're set up or things like get_funding_signed on an
 // inbound channel.
 //
 // Holder designates channel data owned for the benefit of the user client.
@@ -2005,20 +2072,35 @@ struct CommitmentTxInfoCached {
 }
 
 impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
-       fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
-               feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
-               -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
+       fn check_remote_fee<F: Deref, L: Deref>(
+               channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
+               feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
+       ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
        {
                // We only bound the fee updates on the upper side to prevent completely absurd feerates,
                // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
                // We generally don't care too much if they set the feerate to something very high, but it
-               // could result in the channel being useless due to everything being dust.
-               let upper_limit = cmp::max(250 * 25,
-                       fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
-               if feerate_per_kw as u64 > upper_limit {
-                       return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
-               }
-               let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+               // could result in the channel being useless due to everything being dust. This doesn't
+               // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
+               // zero fee, so their fee is no longer considered to determine dust limits.
+               if !channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       let upper_limit = cmp::max(250 * 25,
+                               fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+                       if feerate_per_kw as u64 > upper_limit {
+                               return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
+                       }
+               }
+
+               // We can afford to use a lower bound with anchors than previously since we can now bump
+               // fees when broadcasting our commitment. However, we must still make sure we meet the
+               // minimum mempool feerate, until package relay is deployed, such that we can ensure the
+               // commitment transaction propagates throughout node mempools on its own.
+               let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       ConfirmationTarget::MempoolMinimum
+               } else {
+                       ConfirmationTarget::Background
+               };
+               let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
                // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
                // occasional issues with feerate disagreements between an initiator that wants a feerate
                // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
@@ -2256,7 +2338,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
-               let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+               let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
                match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
                        UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
                                // Even if we aren't supposed to let new monitor updates with commitment state
@@ -2264,43 +2346,30 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // matter what. Sadly, to push a new monitor update which flies before others
                                // already queued, we have to insert it into the pending queue and update the
                                // update_ids of all the following monitors.
-                               let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+                               if release_cs_monitor && msg.is_some() {
                                        let mut additional_update = self.build_commitment_no_status_check(logger);
                                        // build_commitment_no_status_check may bump latest_monitor_id but we want them
                                        // to be strictly increasing by one, so decrement it here.
                                        self.context.latest_monitor_update_id = monitor_update.update_id;
                                        monitor_update.updates.append(&mut additional_update.updates);
-                                       self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       self.context.pending_monitor_updates.len() - 1
                                } else {
-                                       let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
-                                               .unwrap_or(self.context.pending_monitor_updates.len());
-                                       let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+                                       let new_mon_id = self.context.blocked_monitor_updates.get(0)
                                                .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
                                        monitor_update.update_id = new_mon_id;
-                                       self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+                                       for held_update in self.context.blocked_monitor_updates.iter_mut() {
                                                held_update.update.update_id += 1;
                                        }
                                        if msg.is_some() {
                                                debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
                                                let update = self.build_commitment_no_status_check(logger);
-                                               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                                       update, blocked: true,
+                                               self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                                                       update,
                                                });
                                        }
-                                       insert_pos
-                               };
-                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
-                               UpdateFulfillCommitFetch::NewClaim {
-                                       monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
-                                               .expect("We just pushed the monitor update").update,
-                                       htlc_value_msat,
                                }
+
+                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+                               UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
                        },
                        UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
                }
@@ -2571,8 +2640,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
 
-       pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
-       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+       pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+               &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+               create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+               FE::Target: FeeEstimator, L::Target: Logger,
+       {
                // We can't accept HTLCs sent after we've sent a shutdown.
                let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
                if local_sent_shutdown {
@@ -2625,6 +2699,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
                let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
@@ -2635,9 +2710,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
                        let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+                       if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+                                       on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -2645,9 +2720,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
                        let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+                       if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+                                       on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
                                pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
@@ -2790,7 +2865,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(())
        }
 
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
                where L::Target: Logger
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
@@ -3014,16 +3089,24 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// Public version of the below, checking relevant preconditions first.
        /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
        /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
                   (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
-                       self.free_holding_cell_htlcs(logger)
+                       self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
 
        /// Frees any pending commitment updates in the holding cell, generating the relevant messages
        /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+       fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
                if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
                        log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
@@ -3047,8 +3130,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // handling this case better and maybe fulfilling some of the HTLCs while attempting
                                // to rebalance channels.
                                match &htlc_update {
-                                       &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
-                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
+                                       &HTLCUpdateAwaitingACK::AddHTLC {
+                                               amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+                                               skimmed_fee_msat, ..
+                                       } => {
+                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
+                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
+                                               {
                                                        Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
                                                        Err(e) => {
                                                                match e {
@@ -3107,7 +3195,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                return (None, htlcs_to_fail);
                        }
                        let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
-                               self.send_update_fee(feerate, false, logger)
+                               self.send_update_fee(feerate, false, fee_estimator, logger)
                        } else {
                                None
                        };
@@ -3134,8 +3222,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
-               where L::Target: Logger,
+       pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger,
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
                        return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
@@ -3335,9 +3425,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
                }
 
-               match self.free_holding_cell_htlcs(logger) {
-                       (Some(_), htlcs_to_fail) => {
-                               let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
+               match self.free_holding_cell_htlcs(fee_estimator, logger) {
+                       (Some(mut additional_update), htlcs_to_fail) => {
                                // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.context.latest_monitor_update_id = monitor_update.update_id;
@@ -3371,8 +3460,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// Queues up an outbound update fee by placing it in the holding cell. You should call
        /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
        /// commitment update.
-       pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
-               let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+       pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
                assert!(msg_opt.is_none(), "We forced holding cell?");
        }
 
@@ -3383,7 +3475,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        ///
        /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
        /// [`Channel`] if `force_holding_cell` is false.
-       fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+       fn send_update_fee<F: Deref, L: Deref>(
+               &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Option<msgs::UpdateFee>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if !self.context.is_outbound() {
                        panic!("Cannot send fee from inbound channel");
                }
@@ -3410,11 +3507,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
                let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
-               if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+               if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
@@ -3553,12 +3651,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
-               let mut found_blocked = false;
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
-                       if upd.blocked { found_blocked = true; }
-                       upd.blocked
-               });
 
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
@@ -3638,7 +3730,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
-               Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
+               Channel::<Signer>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
                let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
 
                self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
@@ -3651,11 +3743,12 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
                        let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                        let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-                       if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+                       if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
                                        msg.feerate_per_kw, holder_tx_dust_exposure)));
                        }
-                       if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
                                        msg.feerate_per_kw, counterparty_tx_dust_exposure)));
                        }
@@ -3690,6 +3783,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        payment_hash: htlc.payment_hash,
                                        cltv_expiry: htlc.cltv_expiry,
                                        onion_routing_packet: (**onion_packet).clone(),
+                                       skimmed_fee_msat: htlc.skimmed_fee_msat,
                                });
                        }
                }
@@ -3976,12 +4070,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// this point if we're the funder we should send the initial closing_signed, and in any case
        /// shutdown should complete within a reasonable timeframe.
        fn closing_negotiation_ready(&self) -> bool {
-               self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
-                       self.context.channel_state &
-                               (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
-                                ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
-                               == BOTH_SIDES_SHUTDOWN_MASK &&
-                       self.context.pending_update_fee.is_none()
+               self.context.closing_negotiation_ready()
        }
 
        /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
@@ -4061,7 +4150,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn shutdown<SP: Deref>(
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
-       ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+       ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        where SP::Target: SignerProvider
        {
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4127,9 +4216,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
+                       self.push_ret_blockable_mon_update(monitor_update)
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
@@ -4419,64 +4506,37 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
        }
 
-       pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
-               if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
-               self.context.pending_monitor_updates[0].update.update_id - 1
+       /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+       pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+               if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+               self.context.blocked_monitor_updates[0].update.update_id - 1
        }
 
        /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
        /// further blocked monitor update exists after the next.
-       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
-               for i in 0..self.context.pending_monitor_updates.len() {
-                       if self.context.pending_monitor_updates[i].blocked {
-                               self.context.pending_monitor_updates[i].blocked = false;
-                               return Some((&self.context.pending_monitor_updates[i].update,
-                                       self.context.pending_monitor_updates.len() > i + 1));
-                       }
-               }
-               None
-       }
-
-       /// Pushes a new monitor update into our monitor update queue, returning whether it should be
-       /// immediately given to the user for persisting or if it should be held as blocked.
-       fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
-               let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
-               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                       update, blocked: !release_monitor
-               });
-               release_monitor
+       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+               if self.context.blocked_monitor_updates.is_empty() { return None; }
+               Some((self.context.blocked_monitor_updates.remove(0).update,
+                       !self.context.blocked_monitor_updates.is_empty()))
        }
 
-       /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
-       /// it should be immediately given to the user for persisting or `None` if it should be held as
-       /// blocked.
+       /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+       /// immediately given to the user for persisting or `None` if it should be held as blocked.
        fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
-       -> Option<&ChannelMonitorUpdate> {
-               let release_monitor = self.push_blockable_mon_update(update);
-               if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
-       }
-
-       pub fn no_monitor_updates_pending(&self) -> bool {
-               self.context.pending_monitor_updates.is_empty()
-       }
-
-       pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if upd.update.update_id <= update_id {
-                               assert!(!upd.blocked, "Completed update must have flown");
-                               false
-                       } else { true }
-               });
-       }
-
-       pub fn complete_one_mon_update(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
+       -> Option<ChannelMonitorUpdate> {
+               let release_monitor = self.context.blocked_monitor_updates.is_empty();
+               if !release_monitor {
+                       self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                               update,
+                       });
+                       None
+               } else {
+                       Some(update)
+               }
        }
 
-       /// Returns an iterator over all unblocked monitor updates which have not yet completed.
-       pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
-               self.context.pending_monitor_updates.iter()
-                       .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
+       pub fn blocked_monitor_updates_pending(&self) -> usize {
+               self.context.blocked_monitor_updates.len()
        }
 
        /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
@@ -4827,6 +4887,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
                }
 
+               let short_channel_id = self.context.get_short_channel_id()
+                       .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
                let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
                        .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
                let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
@@ -4835,7 +4897,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                let msg = msgs::UnsignedChannelAnnouncement {
                        features: channelmanager::provided_channel_features(&user_config),
                        chain_hash,
-                       short_channel_id: self.context.get_short_channel_id().unwrap(),
+                       short_channel_id,
                        node_id_1: if were_node_one { node_id } else { counterparty_node_id },
                        node_id_2: if were_node_one { counterparty_node_id } else { node_id },
                        bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
@@ -4893,11 +4955,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        },
                        Ok(v) => v
                };
+               let short_channel_id = match self.context.get_short_channel_id() {
+                       Some(scid) => scid,
+                       None => return None,
+               };
+
                self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
 
                Some(msgs::AnnouncementSignatures {
                        channel_id: self.context.channel_id(),
-                       short_channel_id: self.context.get_short_channel_id().unwrap(),
+                       short_channel_id,
                        node_signature: our_node_sig,
                        bitcoin_signature: our_bitcoin_sig,
                })
@@ -5037,11 +5104,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// commitment update.
        ///
        /// `Err`s will only be [`ChannelError::Ignore`].
-       pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, logger: &L)
-       -> Result<(), ChannelError> where L::Target: Logger {
+       pub fn queue_add_htlc<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+               onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                self
-                       .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
+                       .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
+                               skimmed_fee_msat, fee_estimator, logger)
                        .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
                        .map_err(|err| {
                                if let ChannelError::Ignore(_) = err { /* fine */ }
@@ -5066,9 +5138,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// on this [`Channel`] if `force_holding_cell` is false.
        ///
        /// `Err`s will only be [`ChannelError::Ignore`].
-       fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
-       -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+       fn send_htlc<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+               onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
+               skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
                        return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
                }
@@ -5081,7 +5157,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
                }
 
-               let available_balances = self.context.get_available_balances();
+               let available_balances = self.context.get_available_balances(fee_estimator);
                if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
                        return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
                                available_balances.next_outbound_htlc_minimum_msat)));
@@ -5120,6 +5196,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                cltv_expiry,
                                source,
                                onion_routing_packet,
+                               skimmed_fee_msat,
                        });
                        return Ok(None);
                }
@@ -5131,6 +5208,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        cltv_expiry,
                        state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
                        source,
+                       skimmed_fee_msat,
                });
 
                let res = msgs::UpdateAddHTLC {
@@ -5140,6 +5218,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        payment_hash,
                        cltv_expiry,
                        onion_routing_packet,
+                       skimmed_fee_msat,
                };
                self.context.next_holder_htlc_id += 1;
 
@@ -5278,8 +5357,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        ///
        /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
        /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
-       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
-               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+       pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+               source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
+                       onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
                if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
                match send_res? {
                        Some(_) => {
@@ -5311,7 +5397,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// [`ChannelMonitorUpdate`] will be returned).
        pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
                target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
        where SP::Target: SignerProvider {
                for htlc in self.context.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
@@ -5382,9 +5468,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
+                       self.push_ret_blockable_mon_update(monitor_update)
                } else { None };
                let shutdown = msgs::Shutdown {
                        channel_id: self.context.channel_id,
@@ -5427,6 +5511,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
 pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
        pub context: ChannelContext<Signer>,
+       pub unfunded_context: UnfundedChannelContext,
 }
 
 impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
@@ -5467,10 +5552,15 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                let channel_type = Self::get_initial_channel_type(&config, their_features);
                debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
 
-               let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+               let commitment_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       ConfirmationTarget::MempoolMinimum
+               } else {
+                       ConfirmationTarget::Normal
+               };
+               let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
 
                let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
-               let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
+               let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
                if value_to_self_msat < commitment_tx_fee {
                        return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
                }
@@ -5564,7 +5654,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                                short_channel_id: None,
                                channel_creation_height: current_chain_height,
 
-                               feerate_per_kw: feerate,
+                               feerate_per_kw: commitment_feerate,
                                counterparty_dust_limit_satoshis: 0,
                                holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                                counterparty_max_htlc_value_in_flight_msat: 0,
@@ -5622,13 +5712,14 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                                channel_type,
                                channel_keys_id,
 
-                               pending_monitor_updates: Vec::new(),
-                       }
+                               blocked_monitor_updates: Vec::new(),
+                       },
+                       unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                })
        }
 
-       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
-       fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+       /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
+       fn get_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
                let counterparty_keys = self.context.build_remote_transaction_keys();
                let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
                Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
@@ -5642,7 +5733,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_outbound_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+       pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
        -> Result<(Channel<Signer>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
                if !self.context.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
@@ -5659,7 +5750,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
                self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
 
-               let signature = match self.get_outbound_funding_created_signature(logger) {
+               let signature = match self.get_funding_created_signature(logger) {
                        Ok(res) => res,
                        Err(e) => {
                                log_error!(logger, "Got bad signatures: {:?}!", e);
@@ -5707,12 +5798,9 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
                // set it now. If they don't understand it, we'll fall back to our default of
                // `only_static_remotekey`.
-               #[cfg(anchors)]
-               { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
-                       if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
-                               their_features.supports_anchors_zero_fee_htlc_tx() {
-                               ret.set_anchors_zero_fee_htlc_tx_required();
-                       }
+               if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+                       their_features.supports_anchors_zero_fee_htlc_tx() {
+                       ret.set_anchors_zero_fee_htlc_tx_required();
                }
 
                ret
@@ -5721,7 +5809,12 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
        /// If we receive an error message, it may only be a rejection of the channel type we tried,
        /// not of our ability to open any channel at all. Thus, on error, we should first call this
        /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
-       pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
+       pub(crate) fn maybe_handle_error_without_close<F: Deref>(
+               &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator<F>
+       ) -> Result<msgs::OpenChannel, ()>
+       where
+               F::Target: FeeEstimator
+       {
                if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
                if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
                        // We've exhausted our options
@@ -5738,6 +5831,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
                // whatever reason.
                if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
                        self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
+                       self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                        assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
                } else if self.context.channel_type.supports_scid_privacy() {
                        self.context.channel_type.clear_scid_privacy();
@@ -5925,6 +6019,7 @@ impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
 pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
        pub context: ChannelContext<Signer>,
+       pub unfunded_context: UnfundedChannelContext,
 }
 
 impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
@@ -6007,7 +6102,7 @@ impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
                if msg.htlc_minimum_msat >= full_channel_value_msat {
                        return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
-               Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
+               Channel::<Signer>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
 
                let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
                if msg.to_self_delay > max_counterparty_selected_contest_delay {
@@ -6251,8 +6346,9 @@ impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
                                channel_type,
                                channel_keys_id,
 
-                               pending_monitor_updates: Vec::new(),
-                       }
+                               blocked_monitor_updates: Vec::new(),
+                       },
+                       unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
 
                Ok(chan)
@@ -6602,9 +6698,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                }
 
                let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
+               let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
 
                (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
-               for htlc in self.context.pending_outbound_htlcs.iter() {
+               for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
                        htlc.htlc_id.write(writer)?;
                        htlc.amount_msat.write(writer)?;
                        htlc.cltv_expiry.write(writer)?;
@@ -6640,18 +6737,37 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                                        reason.write(writer)?;
                                }
                        }
+                       if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
+                               if pending_outbound_skimmed_fees.is_empty() {
+                                       for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
+                               }
+                               pending_outbound_skimmed_fees.push(Some(skimmed_fee));
+                       } else if !pending_outbound_skimmed_fees.is_empty() {
+                               pending_outbound_skimmed_fees.push(None);
+                       }
                }
 
+               let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
                (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
-               for update in self.context.holding_cell_htlc_updates.iter() {
+               for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
                        match update {
-                               &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
+                               &HTLCUpdateAwaitingACK::AddHTLC {
+                                       ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+                                       skimmed_fee_msat,
+                               } => {
                                        0u8.write(writer)?;
                                        amount_msat.write(writer)?;
                                        cltv_expiry.write(writer)?;
                                        payment_hash.write(writer)?;
                                        source.write(writer)?;
                                        onion_routing_packet.write(writer)?;
+
+                                       if let Some(skimmed_fee) = skimmed_fee_msat {
+                                               if holding_cell_skimmed_fees.is_empty() {
+                                                       for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
+                                               }
+                                               holding_cell_skimmed_fees.push(Some(skimmed_fee));
+                                       } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
                                },
                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
                                        1u8.write(writer)?;
@@ -6804,10 +6920,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                        (5, self.context.config, required),
                        (6, serialized_holder_htlc_max_in_flight, option),
                        (7, self.context.shutdown_scriptpubkey, option),
+                       (8, self.context.blocked_monitor_updates, optional_vec),
                        (9, self.context.target_closing_feerate_sats_per_kw, option),
-                       (11, self.context.monitor_pending_finalized_fulfills, vec_type),
+                       (11, self.context.monitor_pending_finalized_fulfills, required_vec),
                        (13, self.context.channel_creation_height, required),
-                       (15, preimages, vec_type),
+                       (15, preimages, required_vec),
                        (17, self.context.announcement_sigs_state, required),
                        (19, self.context.latest_inbound_scid_alias, option),
                        (21, self.context.outbound_scid_alias, required),
@@ -6817,7 +6934,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                        (28, holder_max_accepted_htlcs, option),
                        (29, self.context.temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, self.context.pending_monitor_updates, vec_type),
+                       (35, pending_outbound_skimmed_fees, optional_vec),
+                       (37, holding_cell_skimmed_fees, optional_vec),
                });
 
                Ok(())
@@ -6928,6 +7046,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        },
                                        _ => return Err(DecodeError::InvalidValue),
                                },
+                               skimmed_fee_msat: None,
                        });
                }
 
@@ -6941,6 +7060,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        payment_hash: Readable::read(reader)?,
                                        source: Readable::read(reader)?,
                                        onion_routing_packet: Readable::read(reader)?,
+                                       skimmed_fee_msat: None,
                                },
                                1 => HTLCUpdateAwaitingACK::ClaimHTLC {
                                        payment_preimage: Readable::read(reader)?,
@@ -7094,7 +7214,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut temporary_channel_id: Option<[u8; 32]> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
 
-               let mut pending_monitor_updates = Some(Vec::new());
+               let mut blocked_monitor_updates = Some(Vec::new());
+
+               let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+               let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
 
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
@@ -7105,10 +7228,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
                        (6, holder_max_htlc_value_in_flight_msat, option),
                        (7, shutdown_scriptpubkey, option),
+                       (8, blocked_monitor_updates, optional_vec),
                        (9, target_closing_feerate_sats_per_kw, option),
-                       (11, monitor_pending_finalized_fulfills, vec_type),
+                       (11, monitor_pending_finalized_fulfills, optional_vec),
                        (13, channel_creation_height, option),
-                       (15, preimages_opt, vec_type),
+                       (15, preimages_opt, optional_vec),
                        (17, announcement_sigs_state, option),
                        (19, latest_inbound_scid_alias, option),
                        (21, outbound_scid_alias, option),
@@ -7118,7 +7242,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (28, holder_max_accepted_htlcs, option),
                        (29, temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, pending_monitor_updates, vec_type),
+                       (35, pending_outbound_skimmed_fees_opt, optional_vec),
+                       (37, holding_cell_skimmed_fees_opt, optional_vec),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7177,6 +7302,25 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
 
+               if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
+                       let mut iter = skimmed_fees.into_iter();
+                       for htlc in pending_outbound_htlcs.iter_mut() {
+                               htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+                       }
+                       // We expect all skimmed fees to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+               if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
+                       let mut iter = skimmed_fees.into_iter();
+                       for htlc in holding_cell_htlc_updates.iter_mut() {
+                               if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
+                                       *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+                               }
+                       }
+                       // We expect all skimmed fees to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+
                Ok(Channel {
                        context: ChannelContext {
                                user_id,
@@ -7294,7 +7438,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
 
-                               pending_monitor_updates: pending_monitor_updates.unwrap(),
+                               blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        }
                })
        }
@@ -7311,7 +7455,6 @@ mod tests {
        use hex;
        use crate::ln::PaymentHash;
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
-       #[cfg(anchors)]
        use crate::ln::channel::InitFeatures;
        use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
@@ -7362,7 +7505,8 @@ mod tests {
                // arithmetic, causing a panic with debug assertions enabled.
                let fee_est = TestFeeEstimator { fee_est: 42 };
                let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
-               assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator,
+               assert!(Channel::<InMemorySigner>::check_remote_fee(
+                       &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
                        u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
        }
 
@@ -7401,7 +7545,7 @@ mod tests {
                }
        }
 
-       #[cfg(not(feature = "grind_signatures"))]
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
                PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
        }
@@ -7492,7 +7636,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7519,7 +7663,8 @@ mod tests {
                                session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
                                first_hop_htlc_msat: 548,
                                payment_id: PaymentId([42; 32]),
-                       }
+                       },
+                       skimmed_fee_msat: None,
                });
 
                // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
@@ -7618,7 +7763,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -7806,7 +7951,7 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
                let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
@@ -8076,6 +8221,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
                        out
@@ -8088,6 +8234,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
                        out
@@ -8498,6 +8645,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
                        out
@@ -8510,6 +8658,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
                        out
@@ -8630,7 +8779,6 @@ mod tests {
                assert!(res.is_ok());
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_supports_anchors_zero_htlc_tx_fee() {
                // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
@@ -8676,7 +8824,6 @@ mod tests {
                assert_eq!(channel_b.context.channel_type, expected_channel_type);
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_rejects_implicit_simple_anchors() {
                // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
@@ -8717,7 +8864,6 @@ mod tests {
                assert!(channel_b.is_err());
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_rejects_simple_anchors_channel_type() {
                // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,