Merge pull request #2393 from wpaulino/bump-transaction-event-handler-fixups
[rust-lightning] / lightning / src / ln / channel.rs
index 3a4b92fb47e15b3cbdc98226379f2f1bc6f32203..8232c5e1b1611e22b2a91f467f5a11e35a526215 100644 (file)
@@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
 use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -41,7 +41,7 @@ use crate::routing::gossip::NodeId;
 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
 use crate::util::logger::Logger;
 use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
 use crate::util::scid_utils::scid_from_parts;
 
 use crate::io;
@@ -224,6 +224,7 @@ struct OutboundHTLCOutput {
        payment_hash: PaymentHash,
        state: OutboundHTLCState,
        source: HTLCSource,
+       skimmed_fee_msat: Option<u64>,
 }
 
 /// See AwaitingRemoteRevoke ChannelState for more info
@@ -235,6 +236,8 @@ enum HTLCUpdateAwaitingACK {
                payment_hash: PaymentHash,
                source: HTLCSource,
                onion_routing_packet: msgs::OnionPacket,
+               // The extra fee we're skimming off the top of this HTLC.
+               skimmed_fee_msat: Option<u64>,
        },
        ClaimHTLC {
                payment_preimage: PaymentPreimage,
@@ -304,6 +307,95 @@ const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisc
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
 
+pub const DEFAULT_MAX_HTLCS: u16 = 50;
+
+pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
+       const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
+       const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
+       if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
+}
+
+#[cfg(not(test))]
+const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+#[cfg(test)]
+pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
+
+pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
+
+/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
+/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
+/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
+/// `holder_max_htlc_value_in_flight_msat`.
+pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
+
+/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
+/// `option_support_large_channel` (aka wumbo channels) is not supported.
+/// It's 2^24 - 1.
+pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+
+/// Total bitcoin supply in satoshis.
+pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
+
+/// The maximum network dust limit for standard script formats. This currently represents the
+/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
+/// transaction non-standard and thus refuses to relay it.
+/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
+/// implementations use this value for their dust limit today.
+pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+
+/// The maximum channel dust limit we will accept from our counterparty.
+pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+
+/// The dust limit is used for both the commitment transaction outputs as well as the closing
+/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
+/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
+/// In order to avoid having to concern ourselves with standardness during the closing process, we
+/// simply require our counterparty to use a dust limit which will leave any segwit output
+/// standard.
+/// See <https://github.com/lightning/bolts/issues/905> for more details.
+pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
+
+// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
+pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+
+/// Used to return a simple Error back to ChannelManager. Will get converted to a
+/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
+/// channel_id in ChannelManager.
+pub(super) enum ChannelError {
+       Ignore(String),
+       Warn(String),
+       Close(String),
+}
+
+impl fmt::Debug for ChannelError {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               match self {
+                       &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
+                       &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
+                       &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+               }
+       }
+}
+
+impl fmt::Display for ChannelError {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               match self {
+                       &ChannelError::Ignore(ref e) => write!(f, "{}", e),
+                       &ChannelError::Warn(ref e) => write!(f, "{}", e),
+                       &ChannelError::Close(ref e) => write!(f, "{}", e),
+               }
+       }
+}
+
+macro_rules! secp_check {
+       ($res: expr, $err: expr) => {
+               match $res {
+                       Ok(thing) => thing,
+                       Err(_) => return Err(ChannelError::Close($err)),
+               }
+       };
+}
+
 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
 /// our counterparty or not. However, we don't want to announce updates right away to avoid
 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
@@ -396,13 +488,13 @@ enum UpdateFulfillFetch {
 }
 
 /// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch<'a> {
+pub enum UpdateFulfillCommitFetch {
        /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
        /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
        /// previously placed in the holding cell (and has since been removed).
        NewClaim {
                /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
-               monitor_update: &'a ChannelMonitorUpdate,
+               monitor_update: ChannelMonitorUpdate,
                /// The value of the HTLC which was claimed, in msat.
                htlc_value_msat: u64,
        },
@@ -435,6 +527,10 @@ pub(super) struct ReestablishResponses {
 }
 
 /// The return type of `force_shutdown`
+///
+/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
+/// channel's counterparty_node_id and channel_id).
 pub(crate) type ShutdownResult = (
        Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
        Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
@@ -496,17 +592,10 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
 
 struct PendingChannelMonitorUpdate {
        update: ChannelMonitorUpdate,
-       /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
-       /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
-       /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
-       ///
-       /// [`ChannelManager`]: super::channelmanager::ChannelManager
-       blocked: bool,
 }
 
 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
        (0, update, required),
-       (2, blocked, required),
 });
 
 /// Contains everything about the channel including state, and various flags.
@@ -777,18 +866,12 @@ pub(super) struct ChannelContext<Signer: ChannelSigner> {
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
 
-       /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
-       /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
-       /// completes we still need to be able to complete the persistence. Thus, we have to keep a
-       /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
-       pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
+       /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
+       /// store it here and only release it to the `ChannelManager` once it asks for it.
+       blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
 }
 
 impl<Signer: ChannelSigner> ChannelContext<Signer> {
-       pub(crate) fn opt_anchors(&self) -> bool {
-               self.channel_transaction_parameters.opt_anchors.is_some()
-       }
-
        /// Allowed in any state (including after shutdown)
        pub fn get_update_time_counter(&self) -> u32 {
                self.update_time_counter
@@ -824,6 +907,34 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
        }
 
+       /// shutdown state returns the state of the channel in its various stages of shutdown
+       pub fn shutdown_state(&self) -> ChannelShutdownState {
+               if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
+                       return ChannelShutdownState::ShutdownComplete;
+               }
+               if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 &&  self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
+                       return ChannelShutdownState::ShutdownInitiated;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
+                       return ChannelShutdownState::ResolvingHTLCs;
+               }
+               if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
+                       return ChannelShutdownState::NegotiatingClosingFee;
+               }
+               return ChannelShutdownState::NotShuttingDown;
+       }
+
+       fn closing_negotiation_ready(&self) -> bool {
+               self.pending_inbound_htlcs.is_empty() &&
+               self.pending_outbound_htlcs.is_empty() &&
+               self.pending_update_fee.is_none() &&
+               self.channel_state &
+               (BOTH_SIDES_SHUTDOWN_MASK |
+                       ChannelState::AwaitingRemoteRevoke as u32 |
+                       ChannelState::PeerDisconnected as u32 |
+                       ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+       }
+
        /// Returns true if this channel is currently available for use. This is a superset of
        /// is_usable() and considers things like the channel being temporarily disabled.
        /// Allowed in any state (including after shutdown)
@@ -980,8 +1091,18 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
        }
 
-       pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
-               self.config.options.max_dust_htlc_exposure_msat
+       pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+               fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+       where F::Target: FeeEstimator
+       {
+               match self.config.options.max_dust_htlc_exposure {
+                       MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
+                               let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
+                                       ConfirmationTarget::HighPriority);
+                               feerate_per_kw as u64 * multiplier
+                       },
+                       MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+               }
        }
 
        /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
@@ -1115,10 +1236,10 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                        ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
                                if $outbound == local { // "offered HTLC output"
                                        let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
-                                       let htlc_tx_fee = if self.opt_anchors() {
+                                       let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                0
                                        } else {
-                                               feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000
+                                               feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
                                        };
                                        if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
                                                log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
@@ -1129,10 +1250,10 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                                        }
                                } else {
                                        let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
-                                       let htlc_tx_fee = if self.opt_anchors() {
+                                       let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                                                0
                                        } else {
-                                               feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000
+                                               feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
                                        };
                                        if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
                                                log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
@@ -1237,8 +1358,8 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                        broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
                }
 
-               let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.channel_transaction_parameters.opt_anchors.is_some());
-               let anchors_val = if self.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
+               let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
+               let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
                let (value_to_self, value_to_remote) = if self.is_outbound() {
                        (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
                } else {
@@ -1273,7 +1394,6 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
                let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
                                                                             value_to_a as u64,
                                                                             value_to_b as u64,
-                                                                            self.channel_transaction_parameters.opt_anchors.is_some(),
                                                                             funding_pubkey_a,
                                                                             funding_pubkey_b,
                                                                             keys.clone(),
@@ -1333,7 +1453,7 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
 
        /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
        /// pays to get_funding_redeemscript().to_v0_p2wsh()).
-       /// Panics if called before accept_channel/new_from_req
+       /// Panics if called before accept_channel/InboundV1Channel::new
        pub fn get_funding_redeemscript(&self) -> Script {
                make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
@@ -1368,412 +1488,563 @@ impl<Signer: ChannelSigner> ChannelContext<Signer> {
        pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
                self.counterparty_forwarding_info.clone()
        }
-}
 
-// Internal utility functions for channels
+       /// Returns a HTLCStats about inbound pending htlcs
+       fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+               let context = self;
+               let mut stats = HTLCStats {
+                       pending_htlcs: context.pending_inbound_htlcs.len() as u32,
+                       pending_htlcs_value_msat: 0,
+                       on_counterparty_tx_dust_exposure_msat: 0,
+                       on_holder_tx_dust_exposure_msat: 0,
+                       holding_cell_msat: 0,
+                       on_holder_tx_holding_cell_htlcs_count: 0,
+               };
 
-/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
-/// `channel_value_satoshis` in msat, set through
-/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
-///
-/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
-///
-/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
-fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
-       let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
-               1
-       } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
-               100
-       } else {
-               config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
-       };
-       channel_value_satoshis * 10 * configured_percent
-}
+               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (0, 0)
+               } else {
+                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+                       (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+                               dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+               };
+               let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+               let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       stats.pending_htlcs_value_msat += htlc.amount_msat;
+                       if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
+                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+                       }
+                       if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
+                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+                       }
+               }
+               stats
+       }
 
-/// Returns a minimum channel reserve value the remote needs to maintain,
-/// required by us according to the configured or default
-/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
-///
-/// Guaranteed to return a value no larger than channel_value_satoshis
-///
-/// This is used both for outbound and inbound channels and has lower bound
-/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
-pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
-       let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
-       cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
-}
+       /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
+       fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+               let context = self;
+               let mut stats = HTLCStats {
+                       pending_htlcs: context.pending_outbound_htlcs.len() as u32,
+                       pending_htlcs_value_msat: 0,
+                       on_counterparty_tx_dust_exposure_msat: 0,
+                       on_holder_tx_dust_exposure_msat: 0,
+                       holding_cell_msat: 0,
+                       on_holder_tx_holding_cell_htlcs_count: 0,
+               };
 
-/// This is for legacy reasons, present for forward-compatibility.
-/// LDK versions older than 0.0.104 don't know how read/handle values other than default
-/// from storage. Hence, we use this function to not persist default values of
-/// `holder_selected_channel_reserve_satoshis` for channels into storage.
-pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
-       let (q, _) = channel_value_satoshis.overflowing_div(100);
-       cmp::min(channel_value_satoshis, cmp::max(q, 1000))
-}
+               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (0, 0)
+               } else {
+                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+                       (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+                               dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+               };
+               let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+               let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+               for ref htlc in context.pending_outbound_htlcs.iter() {
+                       stats.pending_htlcs_value_msat += htlc.amount_msat;
+                       if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
+                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+                       }
+                       if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
+                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+                       }
+               }
 
-// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
-// Note that num_htlcs should not include dust HTLCs.
-#[inline]
-fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
-       feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
-}
+               for update in context.holding_cell_htlc_updates.iter() {
+                       if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
+                               stats.pending_htlcs += 1;
+                               stats.pending_htlcs_value_msat += amount_msat;
+                               stats.holding_cell_msat += amount_msat;
+                               if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
+                                       stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
+                               }
+                               if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
+                                       stats.on_holder_tx_dust_exposure_msat += amount_msat;
+                               } else {
+                                       stats.on_holder_tx_holding_cell_htlcs_count += 1;
+                               }
+                       }
+               }
+               stats
+       }
 
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
-// inbound channel.
-//
-// Holder designates channel data owned for the benefit of the user client.
-// Counterparty designates channel data owned by the another channel participant entity.
-pub(super) struct Channel<Signer: ChannelSigner> {
-       pub context: ChannelContext<Signer>,
-}
+       /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
+       /// Doesn't bother handling the
+       /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
+       /// corner case properly.
+       pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+       -> AvailableBalances
+       where F::Target: FeeEstimator
+       {
+               let context = &self;
+               // Note that we have to handle overflow due to the above case.
+               let inbound_stats = context.get_inbound_pending_htlc_stats(None);
+               let outbound_stats = context.get_outbound_pending_htlc_stats(None);
 
-#[cfg(any(test, fuzzing))]
-struct CommitmentTxInfoCached {
-       fee: u64,
-       total_pending_htlcs: usize,
-       next_holder_htlc_id: u64,
-       next_counterparty_htlc_id: u64,
-       feerate: u32,
-}
+               let mut balance_msat = context.value_to_self_msat;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
+                               balance_msat += htlc.amount_msat;
+                       }
+               }
+               balance_msat -= outbound_stats.pending_htlcs_value_msat;
 
-pub const DEFAULT_MAX_HTLCS: u16 = 50;
+               let outbound_capacity_msat = context.value_to_self_msat
+                               .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+                               .saturating_sub(
+                                       context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
 
-pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
-       const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
-       const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
-       if opt_anchors { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
-}
+               let mut available_capacity_msat = outbound_capacity_msat;
 
-#[cfg(not(test))]
-const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
-#[cfg(test)]
-pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
-
-pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
-
-/// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
-/// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
-/// although LDK 0.0.104+ enabled serialization of channels with a different value set for
-/// `holder_max_htlc_value_in_flight_msat`.
-pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
-
-/// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
-/// `option_support_large_channel` (aka wumbo channels) is not supported.
-/// It's 2^24 - 1.
-pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
+               if context.is_outbound() {
+                       // We should mind channel commit tx fee when computing how much of the available capacity
+                       // can be used in the next htlc. Mirrors the logic in send_htlc.
+                       //
+                       // The fee depends on whether the amount we will be sending is above dust or not,
+                       // and the answer will in turn change the amount itself â€” making it a circular
+                       // dependency.
+                       // This complicates the computation around dust-values, up to the one-htlc-value.
+                       let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
+                       if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
+                       }
 
-/// Total bitcoin supply in satoshis.
-pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
+                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
+                       let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+                       let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
+                       let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
 
-/// The maximum network dust limit for standard script formats. This currently represents the
-/// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
-/// transaction non-standard and thus refuses to relay it.
-/// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
-/// implementations use this value for their dust limit today.
-pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
+                       // We will first subtract the fee as if we were above-dust. Then, if the resulting
+                       // value ends up being below dust, we have this fee available again. In that case,
+                       // match the value to right-below-dust.
+                       let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
+                       if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
+                               let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
+                               debug_assert!(one_htlc_difference_msat != 0);
+                               capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
+                               capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
+                               available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
+                       } else {
+                               available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+                       }
+               } else {
+                       // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
+                       // sending a new HTLC won't reduce their balance below our reserve threshold.
+                       let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
+                       if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
+                       }
 
-/// The maximum channel dust limit we will accept from our counterparty.
-pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
+                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
+                       let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
 
-/// The dust limit is used for both the commitment transaction outputs as well as the closing
-/// transactions. For cooperative closing transactions, we require segwit outputs, though accept
-/// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
-/// In order to avoid having to concern ourselves with standardness during the closing process, we
-/// simply require our counterparty to use a dust limit which will leave any segwit output
-/// standard.
-/// See <https://github.com/lightning/bolts/issues/905> for more details.
-pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
+                       let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
+                       let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
+                               .saturating_sub(inbound_stats.pending_htlcs_value_msat);
 
-// Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
-pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+                       if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
+                               // If another HTLC's fee would reduce the remote's balance below the reserve limit
+                               // we've selected for them, we can only send dust HTLCs.
+                               available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+                       }
+               }
 
-/// Used to return a simple Error back to ChannelManager. Will get converted to a
-/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
-/// channel_id in ChannelManager.
-pub(super) enum ChannelError {
-       Ignore(String),
-       Warn(String),
-       Close(String),
-}
+               let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
 
-impl fmt::Debug for ChannelError {
-       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-               match self {
-                       &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
-                       &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
-                       &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
-               }
-       }
-}
+               // If we get close to our maximum dust exposure, we end up in a situation where we can send
+               // between zero and the remaining dust exposure limit remaining OR above the dust limit.
+               // Because we cannot express this as a simple min/max, we prefer to tell the user they can
+               // send above the dust limit (as the router can always overpay to meet the dust limit).
+               let mut remaining_msat_below_dust_exposure_limit = None;
+               let mut dust_exposure_dust_limit_msat = 0;
+               let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
 
-macro_rules! secp_check {
-       ($res: expr, $err: expr) => {
-               match $res {
-                       Ok(thing) => thing,
-                       Err(_) => return Err(ChannelError::Close($err)),
+               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
+               } else {
+                       let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
+                       (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+                        context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+               };
+               let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+                       remaining_msat_below_dust_exposure_limit =
+                               Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
-       };
-}
 
-impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
-       fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
-               // The default channel type (ie the first one we try) depends on whether the channel is
-               // public - if it is, we just go with `only_static_remotekey` as it's the only option
-               // available. If it's private, we first try `scid_privacy` as it provides better privacy
-               // with no other changes, and fall back to `only_static_remotekey`.
-               let mut ret = ChannelTypeFeatures::only_static_remote_key();
-               if !config.channel_handshake_config.announced_channel &&
-                       config.channel_handshake_config.negotiate_scid_privacy &&
-                       their_features.supports_scid_privacy() {
-                       ret.set_scid_privacy_required();
+               let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+                       remaining_msat_below_dust_exposure_limit = Some(cmp::min(
+                               remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
+                               max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
                }
 
-               // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
-               // set it now. If they don't understand it, we'll fall back to our default of
-               // `only_static_remotekey`.
-               #[cfg(anchors)]
-               { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
-                       if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
-                               their_features.supports_anchors_zero_fee_htlc_tx() {
-                               ret.set_anchors_zero_fee_htlc_tx_required();
+               if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
+                       if available_capacity_msat < dust_exposure_dust_limit_msat {
+                               available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
+                       } else {
+                               next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
                        }
                }
 
-               ret
-       }
+               available_capacity_msat = cmp::min(available_capacity_msat,
+                       context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
 
-       /// If we receive an error message, it may only be a rejection of the channel type we tried,
-       /// not of our ability to open any channel at all. Thus, on error, we should first call this
-       /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
-       pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
-               if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
-               if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
-                       // We've exhausted our options
-                       return Err(());
+               if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
+                       available_capacity_msat = 0;
                }
-               // We support opening a few different types of channels. Try removing our additional
-               // features one by one until we've either arrived at our default or the counterparty has
-               // accepted one.
-               //
-               // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
-               // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
-               // checks whether the counterparty supports every feature, this would only happen if the
-               // counterparty is advertising the feature, but rejecting channels proposing the feature for
-               // whatever reason.
-               if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
-                       assert!(self.context.channel_transaction_parameters.opt_non_zero_fee_anchors.is_none());
-                       self.context.channel_transaction_parameters.opt_anchors = None;
-               } else if self.context.channel_type.supports_scid_privacy() {
-                       self.context.channel_type.clear_scid_privacy();
-               } else {
-                       self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
+
+               AvailableBalances {
+                       inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
+                                       - context.value_to_self_msat as i64
+                                       - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+                                       - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
+                               0) as u64,
+                       outbound_capacity_msat,
+                       next_outbound_htlc_limit_msat: available_capacity_msat,
+                       next_outbound_htlc_minimum_msat,
+                       balance_msat,
                }
-               Ok(self.get_open_channel(chain_hash))
        }
 
-       // Constructors:
-       pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
-               fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
-               channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
-               outbound_scid_alias: u64
-       ) -> Result<Channel<Signer>, APIError>
-       where ES::Target: EntropySource,
-             SP::Target: SignerProvider<Signer = Signer>,
-             F::Target: FeeEstimator,
-       {
-               let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
-               let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
-               let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
-               let pubkeys = holder_signer.pubkeys().clone();
-
-               if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
-                       return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
-               }
-               if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
-                       return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
-               }
-               let channel_value_msat = channel_value_satoshis * 1000;
-               if push_msat > channel_value_msat {
-                       return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
-               }
-               if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
-                       return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
-               }
-               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
-               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       // Protocol level safety check in place, although it should never happen because
-                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
-                       return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
-               }
+       pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
+               let context = &self;
+               (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
+       }
 
-               let channel_type = Self::get_initial_channel_type(&config, their_features);
-               debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+       /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
+       /// number of pending HTLCs that are on track to be in our next commitment tx.
+       ///
+       /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+       /// `fee_spike_buffer_htlc` is `Some`.
+       ///
+       /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+       /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+       ///
+       /// Dust HTLCs are excluded.
+       fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+               let context = &self;
+               assert!(context.is_outbound());
 
-               let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (0, 0)
+               } else {
+                       (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+                               context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+               };
+               let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+               let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
 
-               let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
-               let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
-               if value_to_self_msat < commitment_tx_fee {
-                       return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+               let mut addl_htlcs = 0;
+               if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+               match htlc.origin {
+                       HTLCInitiator::LocalOffered => {
+                               if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+                                       addl_htlcs += 1;
+                               }
+                       },
+                       HTLCInitiator::RemoteOffered => {
+                               if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+                                       addl_htlcs += 1;
+                               }
+                       }
                }
 
-               let mut secp_ctx = Secp256k1::new();
-               secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
-
-               let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
-                       match signer_provider.get_shutdown_scriptpubkey() {
-                               Ok(scriptpubkey) => Some(scriptpubkey),
-                               Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+               let mut included_htlcs = 0;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
+                               continue
                        }
-               } else { None };
+                       // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
+                       // transaction including this HTLC if it times out before they RAA.
+                       included_htlcs += 1;
+               }
 
-               if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
-                       if !shutdown_scriptpubkey.is_compatible(&their_features) {
-                               return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+               for ref htlc in context.pending_outbound_htlcs.iter() {
+                       if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
+                               continue
+                       }
+                       match htlc.state {
+                               OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
+                               OutboundHTLCState::Committed => included_htlcs += 1,
+                               OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+                               // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
+                               // transaction won't be generated until they send us their next RAA, which will mean
+                               // dropping any HTLCs in this state.
+                               _ => {},
                        }
                }
 
-               let destination_script = match signer_provider.get_destination_script() {
-                       Ok(script) => script,
-                       Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
-               };
-
-               let temporary_channel_id = entropy_source.get_secure_random_bytes();
-
-               Ok(Channel {
-                       context: ChannelContext {
-                               user_id,
-
-                               config: LegacyChannelConfig {
-                                       options: config.channel_config.clone(),
-                                       announced_channel: config.channel_handshake_config.announced_channel,
-                                       commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+               for htlc in context.holding_cell_htlc_updates.iter() {
+                       match htlc {
+                               &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
+                                       if amount_msat / 1000 < real_dust_limit_timeout_sat {
+                                               continue
+                                       }
+                                       included_htlcs += 1
                                },
+                               _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
+                                        // ack we're guaranteed to never include them in commitment txs anymore.
+                       }
+               }
 
-                               prev_config: None,
+               let num_htlcs = included_htlcs + addl_htlcs;
+               let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+               #[cfg(any(test, fuzzing))]
+               {
+                       let mut fee = res;
+                       if fee_spike_buffer_htlc.is_some() {
+                               fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
+                       }
+                       let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
+                               + context.holding_cell_htlc_updates.len();
+                       let commitment_tx_info = CommitmentTxInfoCached {
+                               fee,
+                               total_pending_htlcs,
+                               next_holder_htlc_id: match htlc.origin {
+                                       HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+                                       HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+                               },
+                               next_counterparty_htlc_id: match htlc.origin {
+                                       HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+                                       HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+                               },
+                               feerate: context.feerate_per_kw,
+                       };
+                       *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+               }
+               res
+       }
 
-                               inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
+       /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
+       /// pending HTLCs that are on track to be in their next commitment tx
+       ///
+       /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+       /// `fee_spike_buffer_htlc` is `Some`.
+       ///
+       /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+       /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+       ///
+       /// Dust HTLCs are excluded.
+       fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+               let context = &self;
+               assert!(!context.is_outbound());
 
-                               channel_id: temporary_channel_id,
-                               temporary_channel_id: Some(temporary_channel_id),
-                               channel_state: ChannelState::OurInitSent as u32,
-                               announcement_sigs_state: AnnouncementSigsState::NotSent,
-                               secp_ctx,
-                               channel_value_satoshis,
+               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (0, 0)
+               } else {
+                       (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+                               context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+               };
+               let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+               let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
 
-                               latest_monitor_update_id: 0,
+               let mut addl_htlcs = 0;
+               if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+               match htlc.origin {
+                       HTLCInitiator::LocalOffered => {
+                               if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+                                       addl_htlcs += 1;
+                               }
+                       },
+                       HTLCInitiator::RemoteOffered => {
+                               if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+                                       addl_htlcs += 1;
+                               }
+                       }
+               }
 
-                               holder_signer,
-                               shutdown_scriptpubkey,
-                               destination_script,
+               // When calculating the set of HTLCs which will be included in their next commitment_signed, all
+               // non-dust inbound HTLCs are included (as all states imply it will be included) and only
+               // committed outbound HTLCs, see below.
+               let mut included_htlcs = 0;
+               for ref htlc in context.pending_inbound_htlcs.iter() {
+                       if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
+                               continue
+                       }
+                       included_htlcs += 1;
+               }
 
-                               cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
-                               cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
-                               value_to_self_msat,
+               for ref htlc in context.pending_outbound_htlcs.iter() {
+                       if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
+                               continue
+                       }
+                       // We only include outbound HTLCs if it will not be included in their next commitment_signed,
+                       // i.e. if they've responded to us with an RAA after announcement.
+                       match htlc.state {
+                               OutboundHTLCState::Committed => included_htlcs += 1,
+                               OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+                               OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
+                               _ => {},
+                       }
+               }
 
-                               pending_inbound_htlcs: Vec::new(),
-                               pending_outbound_htlcs: Vec::new(),
-                               holding_cell_htlc_updates: Vec::new(),
-                               pending_update_fee: None,
-                               holding_cell_update_fee: None,
-                               next_holder_htlc_id: 0,
-                               next_counterparty_htlc_id: 0,
-                               update_time_counter: 1,
+               let num_htlcs = included_htlcs + addl_htlcs;
+               let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+               #[cfg(any(test, fuzzing))]
+               {
+                       let mut fee = res;
+                       if fee_spike_buffer_htlc.is_some() {
+                               fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
+                       }
+                       let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
+                       let commitment_tx_info = CommitmentTxInfoCached {
+                               fee,
+                               total_pending_htlcs,
+                               next_holder_htlc_id: match htlc.origin {
+                                       HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+                                       HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+                               },
+                               next_counterparty_htlc_id: match htlc.origin {
+                                       HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+                                       HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+                               },
+                               feerate: context.feerate_per_kw,
+                       };
+                       *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+               }
+               res
+       }
 
-                               resend_order: RAACommitmentOrder::CommitmentFirst,
+       /// Returns transaction if there is pending funding transaction that is yet to broadcast
+       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+               if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
+                       self.funding_transaction.clone()
+               } else {
+                       None
+               }
+       }
 
-                               monitor_pending_channel_ready: false,
-                               monitor_pending_revoke_and_ack: false,
-                               monitor_pending_commitment_signed: false,
-                               monitor_pending_forwards: Vec::new(),
-                               monitor_pending_failures: Vec::new(),
-                               monitor_pending_finalized_fulfills: Vec::new(),
+       /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+       /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+       /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+       /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+       /// immediately (others we will have to allow to time out).
+       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+               // Note that we MUST only generate a monitor update that indicates force-closure - we're
+               // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+               // being fully configured in some cases. Thus, its likely any monitor events we generate will
+               // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+               assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
 
-                               #[cfg(debug_assertions)]
-                               holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
-                               #[cfg(debug_assertions)]
-                               counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+               // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+               // return them to fail the payment.
+               let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+               let counterparty_node_id = self.get_counterparty_node_id();
+               for htlc_update in self.holding_cell_htlc_updates.drain(..) {
+                       match htlc_update {
+                               HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
+                               },
+                               _ => {}
+                       }
+               }
+               let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+                       // returning a channel monitor update here would imply a channel monitor update before
+                       // we even registered the channel monitor to begin with, which is invalid.
+                       // Thus, if we aren't actually at a point where we could conceivably broadcast the
+                       // funding transaction, don't return a funding txo (which prevents providing the
+                       // monitor update to the user, even if we return one).
+                       // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+                       if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+                               self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+                               Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+                                       update_id: self.latest_monitor_update_id,
+                                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+                               }))
+                       } else { None }
+               } else { None };
 
-                               last_sent_closing_fee: None,
-                               pending_counterparty_closing_signed: None,
-                               closing_fee_limits: None,
-                               target_closing_feerate_sats_per_kw: None,
+               self.channel_state = ChannelState::ShutdownComplete as u32;
+               self.update_time_counter += 1;
+               (monitor_update, dropped_outbound_htlcs)
+       }
+}
 
-                               inbound_awaiting_accept: false,
+// Internal utility functions for channels
 
-                               funding_tx_confirmed_in: None,
-                               funding_tx_confirmation_height: 0,
-                               short_channel_id: None,
-                               channel_creation_height: current_chain_height,
+/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+/// `channel_value_satoshis` in msat, set through
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+///
+/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
+///
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
+fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
+       let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
+               1
+       } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
+               100
+       } else {
+               config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
+       };
+       channel_value_satoshis * 10 * configured_percent
+}
 
-                               feerate_per_kw: feerate,
-                               counterparty_dust_limit_satoshis: 0,
-                               holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
-                               counterparty_max_htlc_value_in_flight_msat: 0,
-                               holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
-                               counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
-                               holder_selected_channel_reserve_satoshis,
-                               counterparty_htlc_minimum_msat: 0,
-                               holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
-                               counterparty_max_accepted_htlcs: 0,
-                               holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
-                               minimum_depth: None, // Filled in in accept_channel
-
-                               counterparty_forwarding_info: None,
-
-                               channel_transaction_parameters: ChannelTransactionParameters {
-                                       holder_pubkeys: pubkeys,
-                                       holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
-                                       is_outbound_from_holder: true,
-                                       counterparty_parameters: None,
-                                       funding_outpoint: None,
-                                       opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
-                                       opt_non_zero_fee_anchors: None
-                               },
-                               funding_transaction: None,
-
-                               counterparty_cur_commitment_point: None,
-                               counterparty_prev_commitment_point: None,
-                               counterparty_node_id,
-
-                               counterparty_shutdown_scriptpubkey: None,
-
-                               commitment_secrets: CounterpartyCommitmentSecrets::new(),
-
-                               channel_update_status: ChannelUpdateStatus::Enabled,
-                               closing_signed_in_flight: false,
-
-                               announcement_sigs: None,
-
-                               #[cfg(any(test, fuzzing))]
-                               next_local_commitment_tx_fee_info_cached: Mutex::new(None),
-                               #[cfg(any(test, fuzzing))]
-                               next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
-
-                               workaround_lnd_bug_4006: None,
-                               sent_message_awaiting_response: None,
+/// Returns a minimum channel reserve value the remote needs to maintain,
+/// required by us according to the configured or default
+/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
+///
+/// Guaranteed to return a value no larger than channel_value_satoshis
+///
+/// This is used both for outbound and inbound channels and has lower bound
+/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+       let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+       cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+}
 
-                               latest_inbound_scid_alias: None,
-                               outbound_scid_alias,
+/// This is for legacy reasons, present for forward-compatibility.
+/// LDK versions older than 0.0.104 don't know how read/handle values other than default
+/// from storage. Hence, we use this function to not persist default values of
+/// `holder_selected_channel_reserve_satoshis` for channels into storage.
+pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+       let (q, _) = channel_value_satoshis.overflowing_div(100);
+       cmp::min(channel_value_satoshis, cmp::max(q, 1000))
+}
 
-                               channel_pending_event_emitted: false,
-                               channel_ready_event_emitted: false,
+// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+#[inline]
+fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+       feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+}
 
-                               #[cfg(any(test, fuzzing))]
-                               historical_inbound_htlc_fulfills: HashSet::new(),
+// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+       // Note that we need to divide before multiplying to round properly,
+       // since the lowest denomination of bitcoin on-chain is the satoshi.
+       (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
+}
 
-                               channel_type,
-                               channel_keys_id,
+// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
+// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
+// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
+// inbound channel.
+//
+// Holder designates channel data owned for the benefit of the user client.
+// Counterparty designates channel data owned by the another channel participant entity.
+pub(super) struct Channel<Signer: ChannelSigner> {
+       pub context: ChannelContext<Signer>,
+}
 
-                               pending_monitor_updates: Vec::new(),
-                       }
-               })
-       }
+#[cfg(any(test, fuzzing))]
+struct CommitmentTxInfoCached {
+       fee: u64,
+       total_pending_htlcs: usize,
+       next_holder_htlc_id: u64,
+       next_counterparty_htlc_id: u64,
+       feerate: u32,
+}
 
+impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
                feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
                -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
@@ -1806,671 +2077,326 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(())
        }
 
-       /// Creates a new channel from a remote sides' request for one.
-       /// Assumes chain_hash has already been checked and corresponds with what we expect!
-       pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
-               fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
-               counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
-               their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
-               current_chain_height: u32, logger: &L, outbound_scid_alias: u64
-       ) -> Result<Channel<Signer>, ChannelError>
-               where ES::Target: EntropySource,
-                         SP::Target: SignerProvider<Signer = Signer>,
-                         F::Target: FeeEstimator,
-                         L::Target: Logger,
-       {
-               let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+       #[inline]
+       fn get_closing_scriptpubkey(&self) -> Script {
+               // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+               // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+               // outside of those situations will fail.
+               self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+       }
 
-               // First check the channel type is known, failing before we do anything else if we don't
-               // support this channel type.
-               let channel_type = if let Some(channel_type) = &msg.channel_type {
-                       if channel_type.supports_any_optional_bits() {
-                               return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
-                       }
+       #[inline]
+       fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+               let mut ret =
+               (4 +                                                   // version
+                1 +                                                   // input count
+                36 +                                                  // prevout
+                1 +                                                   // script length (0)
+                4 +                                                   // sequence
+                1 +                                                   // output count
+                4                                                     // lock time
+                )*4 +                                                 // * 4 for non-witness parts
+               2 +                                                    // witness marker and flag
+               1 +                                                    // witness element count
+               4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
+               self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+               2*(1 + 71);                                            // two signatures + sighash type flags
+               if let Some(spk) = a_scriptpubkey {
+                       ret += ((8+1) +                                    // output values and script length
+                               spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
+               }
+               if let Some(spk) = b_scriptpubkey {
+                       ret += ((8+1) +                                    // output values and script length
+                               spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
+               }
+               ret
+       }
 
-                       // We only support the channel types defined by the `ChannelManager` in
-                       // `provided_channel_type_features`. The channel type must always support
-                       // `static_remote_key`.
-                       if !channel_type.requires_static_remote_key() {
-                               return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
-                       }
-                       // Make sure we support all of the features behind the channel type.
-                       if !channel_type.is_subset(our_supported_features) {
-                               return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
-                       }
-                       if channel_type.requires_scid_privacy() && announced_channel {
-                               return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
-                       }
-                       channel_type.clone()
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       channel_type
-               };
-               let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
+       #[inline]
+       fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+               assert!(self.context.pending_inbound_htlcs.is_empty());
+               assert!(self.context.pending_outbound_htlcs.is_empty());
+               assert!(self.context.pending_update_fee.is_none());
 
-               let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
-               let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
-               let pubkeys = holder_signer.pubkeys().clone();
-               let counterparty_pubkeys = ChannelPublicKeys {
-                       funding_pubkey: msg.funding_pubkey,
-                       revocation_basepoint: msg.revocation_basepoint,
-                       payment_point: msg.payment_point,
-                       delayed_payment_basepoint: msg.delayed_payment_basepoint,
-                       htlc_basepoint: msg.htlc_basepoint
-               };
+               let mut total_fee_satoshis = proposed_total_fee_satoshis;
+               let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+               let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
 
-               if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+               if value_to_holder < 0 {
+                       assert!(self.context.is_outbound());
+                       total_fee_satoshis += (-value_to_holder) as u64;
+               } else if value_to_counterparty < 0 {
+                       assert!(!self.context.is_outbound());
+                       total_fee_satoshis += (-value_to_counterparty) as u64;
                }
 
-               // Check sanity of message fields:
-               if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
-                       return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
-               }
-               if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > msg.funding_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
-               }
-               let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
-               if msg.push_msat > full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
-               }
-               if msg.dust_limit_satoshis > msg.funding_satoshis {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
-               }
-               if msg.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+               if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+                       value_to_counterparty = 0;
                }
-               Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
 
-               let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
-               if msg.to_self_delay > max_counterparty_selected_contest_delay {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
-               }
-               if msg.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
-               }
-               if msg.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+               if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+                       value_to_holder = 0;
                }
 
-               // Now check against optional parameters as set by config...
-               if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
-                       return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
-               }
-               if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
-               }
-               if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
-               }
-               if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
-               }
-               if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
-               }
-               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
+               assert!(self.context.shutdown_scriptpubkey.is_some());
+               let holder_shutdown_script = self.get_closing_scriptpubkey();
+               let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+               let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
 
-               // Convert things into internal flags and prep our state:
+               let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+               (closing_transaction, total_fee_satoshis)
+       }
 
-               if config.channel_handshake_limits.force_announced_channel_preference {
-                       if config.channel_handshake_config.announced_channel != announced_channel {
-                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
-                       }
-               }
+       fn funding_outpoint(&self) -> OutPoint {
+               self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+       }
 
-               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
-               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       // Protocol level safety check in place, although it should never happen because
-                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
-               }
-               if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
-                               msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
-               }
-               if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+       /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+       /// entirely.
+       ///
+       /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+       /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+       ///
+       /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+       /// disconnected).
+       pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+               (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+       where L::Target: Logger {
+               // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+               // (see equivalent if condition there).
+               assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+               let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+               let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+               self.context.latest_monitor_update_id = mon_update_id;
+               if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+                       assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
                }
+       }
 
-               // check if the funder's amount for the initial commitment tx is sufficient
-               // for full fee payment plus a few HTLCs to ensure the channel will be useful.
-               let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
-               let commitment_tx_fee = Self::commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
-               if funders_amount_msat / 1000 < commitment_tx_fee {
-                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+               // Either ChannelReady got set (which means it won't be unset) or there is no way any
+               // caller thought we could have something claimed (cause we wouldn't have accepted in an
+               // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+               // either.
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
                }
+               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
-               let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
-               // While it's reasonable for us to not meet the channel reserve initially (if they don't
-               // want to push much to us), our counterparty should always have more than our reserve.
-               if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
-               }
+               let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
 
-               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
-                       match &msg.shutdown_scriptpubkey {
-                               &Some(ref script) => {
-                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
-                                       if script.len() == 0 {
-                                               None
-                                       } else {
-                                               if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+               // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+               // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+               // these, but for now we just have to treat them as normal.
+
+               let mut pending_idx = core::usize::MAX;
+               let mut htlc_value_msat = 0;
+               for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+                       if htlc.htlc_id == htlc_id_arg {
+                               assert_eq!(htlc.payment_hash, payment_hash_calc);
+                               match htlc.state {
+                                       InboundHTLCState::Committed => {},
+                                       InboundHTLCState::LocalRemoved(ref reason) => {
+                                               if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+                                               } else {
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
+                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
-                                               Some(script.clone())
+                                               return UpdateFulfillFetch::DuplicateClaim {};
+                                       },
+                                       _ => {
+                                               debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+                                               // Don't return in release mode here so that we can update channel_monitor
                                        }
-                               },
-                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
+                               pending_idx = idx;
+                               htlc_value_msat = htlc.amount_msat;
+                               break;
                        }
-               } else { None };
+               }
+               if pending_idx == core::usize::MAX {
+                       #[cfg(any(test, fuzzing))]
+                       // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+                       // this is simply a duplicate claim, not previously failed and we lost funds.
+                       debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                       return UpdateFulfillFetch::DuplicateClaim {};
+               }
 
-               let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
-                       match signer_provider.get_shutdown_scriptpubkey() {
-                               Ok(scriptpubkey) => Some(scriptpubkey),
-                               Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+               // Now update local state:
+               //
+               // We have to put the payment_preimage in the channel_monitor right away here to ensure we
+               // can claim it even if the channel hits the chain before we see their next commitment.
+               self.context.latest_monitor_update_id += 1;
+               let monitor_update = ChannelMonitorUpdate {
+                       update_id: self.context.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+                               payment_preimage: payment_preimage_arg.clone(),
+                       }],
+               };
+
+               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+                       // Note that this condition is the same as the assertion in
+                       // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+                       // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+                       // do not not get into this branch.
+                       for pending_update in self.context.holding_cell_htlc_updates.iter() {
+                               match pending_update {
+                                       &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+                                               if htlc_id_arg == htlc_id {
+                                                       // Make sure we don't leave latest_monitor_update_id incremented here:
+                                                       self.context.latest_monitor_update_id -= 1;
+                                                       #[cfg(any(test, fuzzing))]
+                                                       debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+                                                       return UpdateFulfillFetch::DuplicateClaim {};
+                                               }
+                                       },
+                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+                                               if htlc_id_arg == htlc_id {
+                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+                                                       // TODO: We may actually be able to switch to a fulfill here, though its
+                                                       // rare enough it may not be worth the complexity burden.
+                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+                                                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+                                               }
+                                       },
+                                       _ => {}
+                               }
                        }
-               } else { None };
+                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+                               payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
+                       });
+                       #[cfg(any(test, fuzzing))]
+                       self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+               }
+               #[cfg(any(test, fuzzing))]
+               self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
 
-               if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
-                       if !shutdown_scriptpubkey.is_compatible(&their_features) {
-                               return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+               {
+                       let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+                       if let InboundHTLCState::Committed = htlc.state {
+                       } else {
+                               debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+                               return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
                        }
+                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+                       htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
 
-               let destination_script = match signer_provider.get_destination_script() {
-                       Ok(script) => script,
-                       Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
-               };
+               UpdateFulfillFetch::NewClaim {
+                       monitor_update,
+                       htlc_value_msat,
+                       msg: Some(msgs::UpdateFulfillHTLC {
+                               channel_id: self.context.channel_id(),
+                               htlc_id: htlc_id_arg,
+                               payment_preimage: payment_preimage_arg,
+                       }),
+               }
+       }
 
-               let mut secp_ctx = Secp256k1::new();
-               secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+               let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
+               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+                               // Even if we aren't supposed to let new monitor updates with commitment state
+                               // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+                               // matter what. Sadly, to push a new monitor update which flies before others
+                               // already queued, we have to insert it into the pending queue and update the
+                               // update_ids of all the following monitors.
+                               if release_cs_monitor && msg.is_some() {
+                                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them
+                                       // to be strictly increasing by one, so decrement it here.
+                                       self.context.latest_monitor_update_id = monitor_update.update_id;
+                                       monitor_update.updates.append(&mut additional_update.updates);
+                               } else {
+                                       let new_mon_id = self.context.blocked_monitor_updates.get(0)
+                                               .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+                                       monitor_update.update_id = new_mon_id;
+                                       for held_update in self.context.blocked_monitor_updates.iter_mut() {
+                                               held_update.update.update_id += 1;
+                                       }
+                                       if msg.is_some() {
+                                               debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+                                               let update = self.build_commitment_no_status_check(logger);
+                                               self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                                                       update,
+                                               });
+                                       }
+                               }
 
-               let chan = Channel {
-                       context: ChannelContext {
-                               user_id,
+                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+                               UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
+                       },
+                       UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+               }
+       }
 
-                               config: LegacyChannelConfig {
-                                       options: config.channel_config.clone(),
-                                       announced_channel,
-                                       commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
-                               },
+       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+       /// before we fail backwards.
+       ///
+       /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+       /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+       /// [`ChannelError::Ignore`].
+       pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+       -> Result<(), ChannelError> where L::Target: Logger {
+               self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+       }
 
-                               prev_config: None,
+       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+       /// before we fail backwards.
+       ///
+       /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+       /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+       /// [`ChannelError::Ignore`].
+       fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+       -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       panic!("Was asked to fail an HTLC when channel was not in an operational state");
+               }
+               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
-                               inbound_handshake_limits_override: None,
-
-                               temporary_channel_id: Some(msg.temporary_channel_id),
-                               channel_id: msg.temporary_channel_id,
-                               channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
-                               announcement_sigs_state: AnnouncementSigsState::NotSent,
-                               secp_ctx,
-
-                               latest_monitor_update_id: 0,
-
-                               holder_signer,
-                               shutdown_scriptpubkey,
-                               destination_script,
-
-                               cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
-                               cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
-                               value_to_self_msat: msg.push_msat,
-
-                               pending_inbound_htlcs: Vec::new(),
-                               pending_outbound_htlcs: Vec::new(),
-                               holding_cell_htlc_updates: Vec::new(),
-                               pending_update_fee: None,
-                               holding_cell_update_fee: None,
-                               next_holder_htlc_id: 0,
-                               next_counterparty_htlc_id: 0,
-                               update_time_counter: 1,
-
-                               resend_order: RAACommitmentOrder::CommitmentFirst,
-
-                               monitor_pending_channel_ready: false,
-                               monitor_pending_revoke_and_ack: false,
-                               monitor_pending_commitment_signed: false,
-                               monitor_pending_forwards: Vec::new(),
-                               monitor_pending_failures: Vec::new(),
-                               monitor_pending_finalized_fulfills: Vec::new(),
-
-                               #[cfg(debug_assertions)]
-                               holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
-                               #[cfg(debug_assertions)]
-                               counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
-
-                               last_sent_closing_fee: None,
-                               pending_counterparty_closing_signed: None,
-                               closing_fee_limits: None,
-                               target_closing_feerate_sats_per_kw: None,
-
-                               inbound_awaiting_accept: true,
-
-                               funding_tx_confirmed_in: None,
-                               funding_tx_confirmation_height: 0,
-                               short_channel_id: None,
-                               channel_creation_height: current_chain_height,
-
-                               feerate_per_kw: msg.feerate_per_kw,
-                               channel_value_satoshis: msg.funding_satoshis,
-                               counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
-                               holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
-                               counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
-                               holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
-                               counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
-                               holder_selected_channel_reserve_satoshis,
-                               counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
-                               holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
-                               counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
-                               holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
-                               minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
-
-                               counterparty_forwarding_info: None,
-
-                               channel_transaction_parameters: ChannelTransactionParameters {
-                                       holder_pubkeys: pubkeys,
-                                       holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
-                                       is_outbound_from_holder: false,
-                                       counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
-                                               selected_contest_delay: msg.to_self_delay,
-                                               pubkeys: counterparty_pubkeys,
-                                       }),
-                                       funding_outpoint: None,
-                                       opt_anchors: if opt_anchors { Some(()) } else { None },
-                                       opt_non_zero_fee_anchors: None
-                               },
-                               funding_transaction: None,
-
-                               counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
-                               counterparty_prev_commitment_point: None,
-                               counterparty_node_id,
-
-                               counterparty_shutdown_scriptpubkey,
-
-                               commitment_secrets: CounterpartyCommitmentSecrets::new(),
-
-                               channel_update_status: ChannelUpdateStatus::Enabled,
-                               closing_signed_in_flight: false,
-
-                               announcement_sigs: None,
-
-                               #[cfg(any(test, fuzzing))]
-                               next_local_commitment_tx_fee_info_cached: Mutex::new(None),
-                               #[cfg(any(test, fuzzing))]
-                               next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
-
-                               workaround_lnd_bug_4006: None,
-                               sent_message_awaiting_response: None,
-
-                               latest_inbound_scid_alias: None,
-                               outbound_scid_alias,
-
-                               channel_pending_event_emitted: false,
-                               channel_ready_event_emitted: false,
-
-                               #[cfg(any(test, fuzzing))]
-                               historical_inbound_htlc_fulfills: HashSet::new(),
-
-                               channel_type,
-                               channel_keys_id,
-
-                               pending_monitor_updates: Vec::new(),
-                       }
-               };
-
-               Ok(chan)
-       }
-
-       #[inline]
-       fn get_closing_scriptpubkey(&self) -> Script {
-               // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
-               // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
-               // outside of those situations will fail.
-               self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
-       }
-
-       #[inline]
-       fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
-               let mut ret =
-               (4 +                                                   // version
-                1 +                                                   // input count
-                36 +                                                  // prevout
-                1 +                                                   // script length (0)
-                4 +                                                   // sequence
-                1 +                                                   // output count
-                4                                                     // lock time
-                )*4 +                                                 // * 4 for non-witness parts
-               2 +                                                    // witness marker and flag
-               1 +                                                    // witness element count
-               4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
-               self.context.get_funding_redeemscript().len() as u64 + // funding witness script
-               2*(1 + 71);                                            // two signatures + sighash type flags
-               if let Some(spk) = a_scriptpubkey {
-                       ret += ((8+1) +                                    // output values and script length
-                               spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
-               }
-               if let Some(spk) = b_scriptpubkey {
-                       ret += ((8+1) +                                    // output values and script length
-                               spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
-               }
-               ret
-       }
-
-       #[inline]
-       fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
-               assert!(self.context.pending_inbound_htlcs.is_empty());
-               assert!(self.context.pending_outbound_htlcs.is_empty());
-               assert!(self.context.pending_update_fee.is_none());
-
-               let mut total_fee_satoshis = proposed_total_fee_satoshis;
-               let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
-               let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
-
-               if value_to_holder < 0 {
-                       assert!(self.context.is_outbound());
-                       total_fee_satoshis += (-value_to_holder) as u64;
-               } else if value_to_counterparty < 0 {
-                       assert!(!self.context.is_outbound());
-                       total_fee_satoshis += (-value_to_counterparty) as u64;
-               }
-
-               if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
-                       value_to_counterparty = 0;
-               }
-
-               if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
-                       value_to_holder = 0;
-               }
-
-               assert!(self.context.shutdown_scriptpubkey.is_some());
-               let holder_shutdown_script = self.get_closing_scriptpubkey();
-               let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
-               let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
-
-               let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
-               (closing_transaction, total_fee_satoshis)
-       }
-
-       fn funding_outpoint(&self) -> OutPoint {
-               self.context.channel_transaction_parameters.funding_outpoint.unwrap()
-       }
-
-       /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
-       /// entirely.
-       ///
-       /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
-       /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
-       ///
-       /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
-       /// disconnected).
-       pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
-               (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
-       where L::Target: Logger {
-               // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
-               // (see equivalent if condition there).
-               assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
-               let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
-               let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
-               self.context.latest_monitor_update_id = mon_update_id;
-               if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
-                       assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
-               }
-       }
-
-       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
-               // Either ChannelReady got set (which means it won't be unset) or there is no way any
-               // caller thought we could have something claimed (cause we wouldn't have accepted in an
-               // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
-               // either.
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
-               }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-
-               let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
-
-               // ChannelManager may generate duplicate claims/fails due to HTLC update events from
-               // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
-               // these, but for now we just have to treat them as normal.
+               // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+               // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+               // these, but for now we just have to treat them as normal.
 
                let mut pending_idx = core::usize::MAX;
-               let mut htlc_value_msat = 0;
                for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
                        if htlc.htlc_id == htlc_id_arg {
-                               assert_eq!(htlc.payment_hash, payment_hash_calc);
                                match htlc.state {
                                        InboundHTLCState::Committed => {},
                                        InboundHTLCState::LocalRemoved(ref reason) => {
                                                if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
                                                } else {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
-                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+                                                       debug_assert!(false, "Tried to fail an HTLC that was already failed");
                                                }
-                                               return UpdateFulfillFetch::DuplicateClaim {};
+                                               return Ok(None);
                                        },
                                        _ => {
                                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                                               // Don't return in release mode here so that we can update channel_monitor
+                                               return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
                                        }
                                }
                                pending_idx = idx;
-                               htlc_value_msat = htlc.amount_msat;
-                               break;
                        }
                }
                if pending_idx == core::usize::MAX {
                        #[cfg(any(test, fuzzing))]
-                       // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
-                       // this is simply a duplicate claim, not previously failed and we lost funds.
+                       // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+                       // is simply a duplicate fail, not previously failed and we failed-back too early.
                        debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
-                       return UpdateFulfillFetch::DuplicateClaim {};
-               }
-
-               // Now update local state:
-               //
-               // We have to put the payment_preimage in the channel_monitor right away here to ensure we
-               // can claim it even if the channel hits the chain before we see their next commitment.
-               self.context.latest_monitor_update_id += 1;
-               let monitor_update = ChannelMonitorUpdate {
-                       update_id: self.context.latest_monitor_update_id,
-                       updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
-                               payment_preimage: payment_preimage_arg.clone(),
-                       }],
-               };
-
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
-                       // Note that this condition is the same as the assertion in
-                       // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
-                       // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
-                       // do not not get into this branch.
-                       for pending_update in self.context.holding_cell_htlc_updates.iter() {
-                               match pending_update {
-                                       &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
-                                               if htlc_id_arg == htlc_id {
-                                                       // Make sure we don't leave latest_monitor_update_id incremented here:
-                                                       self.context.latest_monitor_update_id -= 1;
-                                                       #[cfg(any(test, fuzzing))]
-                                                       debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
-                                                       return UpdateFulfillFetch::DuplicateClaim {};
-                                               }
-                                       },
-                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
-                                               if htlc_id_arg == htlc_id {
-                                                       log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
-                                                       // TODO: We may actually be able to switch to a fulfill here, though its
-                                                       // rare enough it may not be worth the complexity burden.
-                                                       debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
-                                                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
-                                               }
-                                       },
-                                       _ => {}
-                               }
-                       }
-                       log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
-                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
-                               payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
-                       });
-                       #[cfg(any(test, fuzzing))]
-                       self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
-                       return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
-               }
-               #[cfg(any(test, fuzzing))]
-               self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
-
-               {
-                       let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
-                       if let InboundHTLCState::Committed = htlc.state {
-                       } else {
-                               debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                               return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
-                       }
-                       log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
-                       htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
-               }
-
-               UpdateFulfillFetch::NewClaim {
-                       monitor_update,
-                       htlc_value_msat,
-                       msg: Some(msgs::UpdateFulfillHTLC {
-                               channel_id: self.context.channel_id(),
-                               htlc_id: htlc_id_arg,
-                               payment_preimage: payment_preimage_arg,
-                       }),
-               }
-       }
-
-       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
-               let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
-               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
-                       UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
-                               // Even if we aren't supposed to let new monitor updates with commitment state
-                               // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
-                               // matter what. Sadly, to push a new monitor update which flies before others
-                               // already queued, we have to insert it into the pending queue and update the
-                               // update_ids of all the following monitors.
-                               let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
-                                       let mut additional_update = self.build_commitment_no_status_check(logger);
-                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them
-                                       // to be strictly increasing by one, so decrement it here.
-                                       self.context.latest_monitor_update_id = monitor_update.update_id;
-                                       monitor_update.updates.append(&mut additional_update.updates);
-                                       self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       self.context.pending_monitor_updates.len() - 1
-                               } else {
-                                       let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
-                                               .unwrap_or(self.context.pending_monitor_updates.len());
-                                       let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
-                                               .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
-                                       monitor_update.update_id = new_mon_id;
-                                       self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
-                                               update: monitor_update, blocked: false,
-                                       });
-                                       for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
-                                               held_update.update.update_id += 1;
-                                       }
-                                       if msg.is_some() {
-                                               debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
-                                               let update = self.build_commitment_no_status_check(logger);
-                                               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                                                       update, blocked: true,
-                                               });
-                                       }
-                                       insert_pos
-                               };
-                               self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
-                               UpdateFulfillCommitFetch::NewClaim {
-                                       monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
-                                               .expect("We just pushed the monitor update").update,
-                                       htlc_value_msat,
-                               }
-                       },
-                       UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
-               }
-       }
-
-       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
-       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
-       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
-       /// before we fail backwards.
-       ///
-       /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
-       /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
-       /// [`ChannelError::Ignore`].
-       pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
-       -> Result<(), ChannelError> where L::Target: Logger {
-               self.fail_htlc(htlc_id_arg, err_packet, true, logger)
-                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
-       }
-
-       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
-       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
-       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
-       /// before we fail backwards.
-       ///
-       /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
-       /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
-       /// [`ChannelError::Ignore`].
-       fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
-       -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       panic!("Was asked to fail an HTLC when channel was not in an operational state");
-               }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-
-               // ChannelManager may generate duplicate claims/fails due to HTLC update events from
-               // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
-               // these, but for now we just have to treat them as normal.
-
-               let mut pending_idx = core::usize::MAX;
-               for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
-                       if htlc.htlc_id == htlc_id_arg {
-                               match htlc.state {
-                                       InboundHTLCState::Committed => {},
-                                       InboundHTLCState::LocalRemoved(ref reason) => {
-                                               if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
-                                               } else {
-                                                       debug_assert!(false, "Tried to fail an HTLC that was already failed");
-                                               }
-                                               return Ok(None);
-                                       },
-                                       _ => {
-                                               debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                                               return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
-                                       }
-                               }
-                               pending_idx = idx;
-                       }
-               }
-               if pending_idx == core::usize::MAX {
-                       #[cfg(any(test, fuzzing))]
-                       // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
-                       // is simply a duplicate fail, not previously failed and we failed-back too early.
-                       debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
-                       return Ok(None);
-               }
-
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
-                       debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
-                       force_holding_cell = true;
+                       return Ok(None);
+               }
+
+               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+                       debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+                       force_holding_cell = true;
                }
 
                // Now update local state:
@@ -2516,285 +2442,33 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        // Message handlers:
 
-       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
-               let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
-               // Check sanity of message fields:
+       /// Handles a funding_signed message from the remote end.
+       /// If this call is successful, broadcast the funding transaction (and not before!)
+       pub fn funding_signed<SP: Deref, L: Deref>(
+               &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+       ) -> Result<ChannelMonitor<Signer>, ChannelError>
+       where
+               SP::Target: SignerProvider<Signer = Signer>,
+               L::Target: Logger
+       {
                if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
-               }
-               if self.context.channel_state != ChannelState::OurInitSent as u32 {
-                       return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
-               }
-               if msg.dust_limit_satoshis > 21000000 * 100000000 {
-                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
-               }
-               if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
-                               msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
-               if msg.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
-               }
-               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
-               if msg.to_self_delay > max_delay_acceptable {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
-               if msg.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+               if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
+                       return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
                }
-               if msg.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
-               // Now check against optional parameters as set by config...
-               if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
-               }
-               if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
-               }
-               if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
-               }
-               if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
-               }
-               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.minimum_depth > peer_limits.max_minimum_depth {
-                       return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
-               }
+               let funding_script = self.context.get_funding_redeemscript();
 
-               if let Some(ty) = &msg.channel_type {
-                       if *ty != self.context.channel_type {
-                               return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
-                       }
-               } else if their_features.supports_channel_type() {
-                       // Assume they've accepted the channel type as they said they understand it.
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       self.context.channel_type = channel_type;
-               }
-
-               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
-                       match &msg.shutdown_scriptpubkey {
-                               &Some(ref script) => {
-                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
-                                       if script.len() == 0 {
-                                               None
-                                       } else {
-                                               if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
-                                               }
-                                               Some(script.clone())
-                                       }
-                               },
-                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
-                               }
-                       }
-               } else { None };
-
-               self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
-               self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
-               self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
-               self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
-               self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
-
-               if peer_limits.trust_own_funding_0conf {
-                       self.context.minimum_depth = Some(msg.minimum_depth);
-               } else {
-                       self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
-               }
-
-               let counterparty_pubkeys = ChannelPublicKeys {
-                       funding_pubkey: msg.funding_pubkey,
-                       revocation_basepoint: msg.revocation_basepoint,
-                       payment_point: msg.payment_point,
-                       delayed_payment_basepoint: msg.delayed_payment_basepoint,
-                       htlc_basepoint: msg.htlc_basepoint
-               };
-
-               self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
-                       selected_contest_delay: msg.to_self_delay,
-                       pubkeys: counterparty_pubkeys,
-               });
-
-               self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
-               self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
-
-               self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
-               self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
-
-               Ok(())
-       }
-
-       fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
-               let funding_script = self.context.get_funding_redeemscript();
-
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
-               {
-                       let trusted_tx = initial_commitment_tx.trust();
-                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
-                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
-                       // They sign the holder commitment transaction...
-                       log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
-                               log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
-                               encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
-                               encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
-                       secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
-               }
-
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-
-               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
-               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
-               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
-                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
-
-               let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
-
-               // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
-               Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
-       }
-
-       pub fn funding_created<SP: Deref, L: Deref>(
-               &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
-       where
-               SP::Target: SignerProvider<Signer = Signer>,
-               L::Target: Logger
-       {
-               if self.context.is_outbound() {
-                       return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
-               }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
-                       // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
-                       // remember the channel, so it's safe to just send an error_message here and drop the
-                       // channel.
-                       return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
-               }
-               if self.context.inbound_awaiting_accept {
-                       return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
-               }
-               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
-                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
-               }
-
-               let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
-               self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
-               // This is an externally observable change before we finish all our checks.  In particular
-               // funding_created_signature may fail.
-               self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-
-               let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
-                       Ok(res) => res,
-                       Err(ChannelError::Close(e)) => {
-                               self.context.channel_transaction_parameters.funding_outpoint = None;
-                               return Err(ChannelError::Close(e));
-                       },
-                       Err(e) => {
-                               // The only error we know how to handle is ChannelError::Close, so we fall over here
-                               // to make sure we don't continue with an inconsistent state.
-                               panic!("unexpected error type from funding_created_signature {:?}", e);
-                       }
-               };
-
-               let holder_commitment_tx = HolderCommitmentTransaction::new(
-                       initial_commitment_tx,
-                       msg.signature,
-                       Vec::new(),
-                       &self.context.get_holder_pubkeys().funding_pubkey,
-                       self.context.counterparty_funding_pubkey()
-               );
-
-               self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
-                       // Now that we're past error-generating stuff, update our local state:
-
-               let funding_redeemscript = self.context.get_funding_redeemscript();
-               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
-               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
-               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
-               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
-               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
-               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
-                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
-                                                         &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
-                                                         &self.context.channel_transaction_parameters,
-                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
-                                                         obscure_factor,
-                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
-               channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
-
-               self.context.channel_state = ChannelState::FundingSent as u32;
-               self.context.channel_id = funding_txo.to_channel_id();
-               self.context.cur_counterparty_commitment_transaction_number -= 1;
-               self.context.cur_holder_commitment_transaction_number -= 1;
-
-               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
-
-               let need_channel_ready = self.check_get_channel_ready(0).is_some();
-               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
-
-               Ok((msgs::FundingSigned {
-                       channel_id: self.context.channel_id,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-               }, channel_monitor))
-       }
-
-       /// Handles a funding_signed message from the remote end.
-       /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<SP: Deref, L: Deref>(
-               &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<ChannelMonitor<Signer>, ChannelError>
-       where
-               SP::Target: SignerProvider<Signer = Signer>,
-               L::Target: Logger
-       {
-               if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
-               }
-               if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
-                       return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
-               }
-               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
-                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
-               }
-
-               let funding_script = self.context.get_funding_redeemscript();
-
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
-               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
 
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
                        log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
@@ -2924,1343 +2598,937 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
        }
 
-       /// Returns transaction if there is pending funding transaction that is yet to broadcast
-       pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
-               if self.context.channel_state & (ChannelState::FundingCreated as u32) != 0 {
-                       self.context.funding_transaction.clone()
-               } else {
-                       None
+       pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+               &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+               create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+               FE::Target: FeeEstimator, L::Target: Logger,
+       {
+               // We can't accept HTLCs sent after we've sent a shutdown.
+               let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+               if local_sent_shutdown {
+                       pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
                }
-       }
-
-       /// Returns a HTLCStats about inbound pending htlcs
-       fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
-               let context = &self.context;
-               let mut stats = HTLCStats {
-                       pending_htlcs: context.pending_inbound_htlcs.len() as u32,
-                       pending_htlcs_value_msat: 0,
-                       on_counterparty_tx_dust_exposure_msat: 0,
-                       on_holder_tx_dust_exposure_msat: 0,
-                       holding_cell_msat: 0,
-                       on_holder_tx_holding_cell_htlcs_count: 0,
-               };
-
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
-                       (0, 0)
-               } else {
-                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
-               };
-               let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
-               let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
-               for ref htlc in context.pending_inbound_htlcs.iter() {
-                       stats.pending_htlcs_value_msat += htlc.amount_msat;
-                       if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
-                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
-                       if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
-                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
+               // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
+               let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+               if remote_sent_shutdown {
+                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+               }
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+               }
+               if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
+                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+               }
+               if msg.amount_msat == 0 {
+                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+               }
+               if msg.amount_msat < self.context.holder_htlc_minimum_msat {
+                       return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
                }
-               stats
-       }
 
-       /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
-       fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
-               let context = &self.context;
-               let mut stats = HTLCStats {
-                       pending_htlcs: context.pending_outbound_htlcs.len() as u32,
-                       pending_htlcs_value_msat: 0,
-                       on_counterparty_tx_dust_exposure_msat: 0,
-                       on_holder_tx_dust_exposure_msat: 0,
-                       holding_cell_msat: 0,
-                       on_holder_tx_holding_cell_htlcs_count: 0,
-               };
+               let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+               let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+               if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
+                       return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+               }
+               if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+               }
+               // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
+               // the reserve_satoshis we told them to always have as direct payment so that they lose
+               // something if we punish them for broadcasting an old state).
+               // Note that we don't really care about having a small/no to_remote output in our local
+               // commitment transactions, as the purpose of the channel reserve is to ensure we can
+               // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
+               // present in the next commitment transaction we send them (at least for fulfilled ones,
+               // failed ones won't modify value_to_self).
+               // Note that we will send HTLCs which another instance of rust-lightning would think
+               // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
+               // Channel state once they will not be present in the next received commitment
+               // transaction).
+               let mut removed_outbound_total_msat = 0;
+               for ref htlc in self.context.pending_outbound_htlcs.iter() {
+                       if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
+                               removed_outbound_total_msat += htlc.amount_msat;
+                       } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
+                               removed_outbound_total_msat += htlc.amount_msat;
+                       }
+               }
 
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.opt_anchors() {
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
-                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
+                       let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+                       (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
+                               dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
                };
-               let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
-               let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
-               for ref htlc in context.pending_outbound_htlcs.iter() {
-                       stats.pending_htlcs_value_msat += htlc.amount_msat;
-                       if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
-                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
-                       if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
-                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+               let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+               if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+                       let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+                       if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+                                       on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
 
-               for update in context.holding_cell_htlc_updates.iter() {
-                       if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
-                               stats.pending_htlcs += 1;
-                               stats.pending_htlcs_value_msat += amount_msat;
-                               stats.holding_cell_msat += amount_msat;
-                               if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
-                                       stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
-                               }
-                               if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
-                                       stats.on_holder_tx_dust_exposure_msat += amount_msat;
-                               } else {
-                                       stats.on_holder_tx_holding_cell_htlcs_count += 1;
-                               }
+               let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+               if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+                       let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+                       if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+                                       on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                }
-               stats
-       }
-
-       /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
-       /// Doesn't bother handling the
-       /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
-       /// corner case properly.
-       pub fn get_available_balances(&self) -> AvailableBalances {
-               let context = &self.context;
-               // Note that we have to handle overflow due to the above case.
-               let inbound_stats = self.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = self.get_outbound_pending_htlc_stats(None);
 
-               let mut balance_msat = context.value_to_self_msat;
-               for ref htlc in context.pending_inbound_htlcs.iter() {
-                       if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
-                               balance_msat += htlc.amount_msat;
-                       }
+               let pending_value_to_self_msat =
+                       self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+               let pending_remote_value_msat =
+                       self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+               if pending_remote_value_msat < msg.amount_msat {
+                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
                }
-               balance_msat -= outbound_stats.pending_htlcs_value_msat;
-
-               let outbound_capacity_msat = context.value_to_self_msat
-                               .saturating_sub(outbound_stats.pending_htlcs_value_msat)
-                               .saturating_sub(
-                                       context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
-
-               let mut available_capacity_msat = outbound_capacity_msat;
 
-               if context.is_outbound() {
-                       // We should mind channel commit tx fee when computing how much of the available capacity
-                       // can be used in the next htlc. Mirrors the logic in send_htlc.
-                       //
-                       // The fee depends on whether the amount we will be sending is above dust or not,
-                       // and the answer will in turn change the amount itself â€” making it a circular
-                       // dependency.
-                       // This complicates the computation around dust-values, up to the one-htlc-value.
-                       let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
-                       if !context.opt_anchors() {
-                               real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
-                       }
+               // Check that the remote can afford to pay for this HTLC on-chain at the current
+               // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
+               let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
+                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+                       self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
+               };
+               if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
+                       return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+               };
 
-                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
-                       let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
-                       let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
-                       let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+               if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
+                       return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+               }
 
-                       // We will first subtract the fee as if we were above-dust. Then, if the resulting
-                       // value ends up being below dust, we have this fee available again. In that case,
-                       // match the value to right-below-dust.
-                       let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
-                       if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
-                               let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
-                               debug_assert!(one_htlc_difference_msat != 0);
-                               capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
-                               capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
-                               available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
-                       } else {
-                               available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+               if !self.context.is_outbound() {
+                       // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+                       // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
+                       // receiver's side, only on the sender's.
+                       // Note that when we eventually remove support for fee updates and switch to anchor output
+                       // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
+                       // the extra htlc when calculating the next remote commitment transaction fee as we should
+                       // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
+                       // sensitive to fee spikes.
+                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+                       let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+                       if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
+                               // Note that if the pending_forward_status is not updated here, then it's because we're already failing
+                               // the HTLC, i.e. its status is already set to failing.
+                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
+                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
                        }
                } else {
-                       // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
-                       // sending a new HTLC won't reduce their balance below our reserve threshold.
-                       let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
-                       if !context.opt_anchors() {
-                               real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
-                       }
-
-                       let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
-                       let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
-
-                       let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
-                       let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
-                               .saturating_sub(inbound_stats.pending_htlcs_value_msat);
-
-                       if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
-                               // If another HTLC's fee would reduce the remote's balance below the reserve limit
-                               // we've selected for them, we can only send dust HTLCs.
-                               available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+                       // Check that they won't violate our local required channel reserve by adding this HTLC.
+                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+                       let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+                       if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
+                               return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
                        }
                }
-
-               let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
-
-               // If we get close to our maximum dust exposure, we end up in a situation where we can send
-               // between zero and the remaining dust exposure limit remaining OR above the dust limit.
-               // Because we cannot express this as a simple min/max, we prefer to tell the user they can
-               // send above the dust limit (as the router can always overpay to meet the dust limit).
-               let mut remaining_msat_below_dust_exposure_limit = None;
-               let mut dust_exposure_dust_limit_msat = 0;
-
-               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
-                       (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
-               } else {
-                       let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
-                       (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
-                        context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
-               };
-               let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
-                       remaining_msat_below_dust_exposure_limit =
-                               Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
-                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
+               if self.context.next_counterparty_htlc_id != msg.htlc_id {
+                       return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
                }
-
-               let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
-                       remaining_msat_below_dust_exposure_limit = Some(cmp::min(
-                               remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
-                               context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
-                       dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
+               if msg.cltv_expiry >= 500000000 {
+                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
-               if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
-                       if available_capacity_msat < dust_exposure_dust_limit_msat {
-                               available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
-                       } else {
-                               next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
+               if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+                       if let PendingHTLCStatus::Forward(_) = pending_forward_status {
+                               panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
                        }
                }
 
-               available_capacity_msat = cmp::min(available_capacity_msat,
-                       context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
+               // Now update local state:
+               self.context.next_counterparty_htlc_id += 1;
+               self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
+                       htlc_id: msg.htlc_id,
+                       amount_msat: msg.amount_msat,
+                       payment_hash: msg.payment_hash,
+                       cltv_expiry: msg.cltv_expiry,
+                       state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+               });
+               Ok(())
+       }
 
-               if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
-                       available_capacity_msat = 0;
+       /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
+       #[inline]
+       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
+               assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
+               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+                       if htlc.htlc_id == htlc_id {
+                               let outcome = match check_preimage {
+                                       None => fail_reason.into(),
+                                       Some(payment_preimage) => {
+                                               let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+                                               if payment_hash != htlc.payment_hash {
+                                                       return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+                                               }
+                                               OutboundHTLCOutcome::Success(Some(payment_preimage))
+                                       }
+                               };
+                               match htlc.state {
+                                       OutboundHTLCState::LocalAnnounced(_) =>
+                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+                                       OutboundHTLCState::Committed => {
+                                               htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
+                                       },
+                                       OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
+                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+                               }
+                               return Ok(htlc);
+                       }
                }
+               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+       }
 
-               AvailableBalances {
-                       inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
-                                       - context.value_to_self_msat as i64
-                                       - self.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
-                                       - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
-                               0) as u64,
-                       outbound_capacity_msat,
-                       next_outbound_htlc_limit_msat: available_capacity_msat,
-                       next_outbound_htlc_minimum_msat,
-                       balance_msat,
+       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+               }
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
+
+               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
        }
 
-       pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
-               let context = &self.context;
-               (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
-       }
+       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+               }
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+               }
 
-       // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
-       // Note that num_htlcs should not include dust HTLCs.
-       fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
-               // Note that we need to divide before multiplying to round properly,
-               // since the lowest denomination of bitcoin on-chain is the satoshi.
-               (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
+               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+               Ok(())
        }
 
-       /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
-       /// number of pending HTLCs that are on track to be in our next commitment tx.
-       ///
-       /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
-       /// `fee_spike_buffer_htlc` is `Some`.
-       ///
-       /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
-       /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
-       ///
-       /// Dust HTLCs are excluded.
-       fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
-               let context = &self.context;
-               assert!(context.is_outbound());
+       pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+               }
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+               }
 
-               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
-                       (0, 0)
-               } else {
-                       (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
-                               context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
-               };
-               let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
-               let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+               Ok(())
+       }
 
-               let mut addl_htlcs = 0;
-               if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
-               match htlc.origin {
-                       HTLCInitiator::LocalOffered => {
-                               if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
-                                       addl_htlcs += 1;
-                               }
-                       },
-                       HTLCInitiator::RemoteOffered => {
-                               if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
-                                       addl_htlcs += 1;
-                               }
-                       }
+       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+               where L::Target: Logger
+       {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
-
-               let mut included_htlcs = 0;
-               for ref htlc in context.pending_inbound_htlcs.iter() {
-                       if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
-                               continue
-                       }
-                       // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
-                       // transaction including this HTLC if it times out before they RAA.
-                       included_htlcs += 1;
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+               }
+               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
-               for ref htlc in context.pending_outbound_htlcs.iter() {
-                       if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
-                               continue
-                       }
-                       match htlc.state {
-                               OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
-                               OutboundHTLCState::Committed => included_htlcs += 1,
-                               OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
-                               // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
-                               // transaction won't be generated until they send us their next RAA, which will mean
-                               // dropping any HTLCs in this state.
-                               _ => {},
+               let funding_script = self.context.get_funding_redeemscript();
+
+               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+
+               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+               let commitment_txid = {
+                       let trusted_tx = commitment_stats.tx.trust();
+                       let bitcoin_tx = trusted_tx.built_transaction();
+                       let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+
+                       log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
+                               log_bytes!(msg.signature.serialize_compact()[..]),
+                               log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
+                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
+                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
                        }
-               }
+                       bitcoin_tx.txid
+               };
+               let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
 
-               for htlc in context.holding_cell_htlc_updates.iter() {
-                       match htlc {
-                               &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
-                                       if amount_msat / 1000 < real_dust_limit_timeout_sat {
-                                               continue
-                                       }
-                                       included_htlcs += 1
-                               },
-                               _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
-                                        // ack we're guaranteed to never include them in commitment txs anymore.
+               // If our counterparty updated the channel fee in this commitment transaction, check that
+               // they can actually afford the new fee now.
+               let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
+                       update_state == FeeUpdateState::RemoteAnnounced
+               } else { false };
+               if update_fee {
+                       debug_assert!(!self.context.is_outbound());
+                       let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
+                       if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
+                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
                        }
                }
-
-               let num_htlcs = included_htlcs + addl_htlcs;
-               let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
                #[cfg(any(test, fuzzing))]
                {
-                       let mut fee = res;
-                       if fee_spike_buffer_htlc.is_some() {
-                               fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
+                       if self.context.is_outbound() {
+                               let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
+                               *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+                               if let Some(info) = projected_commit_tx_info {
+                                       let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
+                                               + self.context.holding_cell_htlc_updates.len();
+                                       if info.total_pending_htlcs == total_pending_htlcs
+                                               && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+                                               && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+                                               && info.feerate == self.context.feerate_per_kw {
+                                                       assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
+                                               }
+                               }
                        }
-                       let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
-                               + context.holding_cell_htlc_updates.len();
-                       let commitment_tx_info = CommitmentTxInfoCached {
-                               fee,
-                               total_pending_htlcs,
-                               next_holder_htlc_id: match htlc.origin {
-                                       HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
-                                       HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
-                               },
-                               next_counterparty_htlc_id: match htlc.origin {
-                                       HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
-                                       HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
-                               },
-                               feerate: context.feerate_per_kw,
-                       };
-                       *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
                }
-               res
-       }
 
-       /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
-       /// pending HTLCs that are on track to be in their next commitment tx
-       ///
-       /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
-       /// `fee_spike_buffer_htlc` is `Some`.
-       ///
-       /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
-       /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
-       ///
-       /// Dust HTLCs are excluded.
-       fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
-               let context = &self.context;
-               assert!(!context.is_outbound());
+               if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
+                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+               }
 
-               let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.opt_anchors() {
-                       (0, 0)
-               } else {
-                       (context.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000,
-                               context.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000)
-               };
-               let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
-               let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+               // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
+               // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
+               // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
+               // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
+               // backwards compatibility, we never use it in production. To provide test coverage, here,
+               // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
+               #[allow(unused_assignments, unused_mut)]
+               let mut separate_nondust_htlc_sources = false;
+               #[cfg(all(feature = "std", any(test, fuzzing)))] {
+                       use core::hash::{BuildHasher, Hasher};
+                       // Get a random value using the only std API to do so - the DefaultHasher
+                       let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+                       separate_nondust_htlc_sources = rand_val % 2 == 0;
+               }
 
-               let mut addl_htlcs = 0;
-               if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
-               match htlc.origin {
-                       HTLCInitiator::LocalOffered => {
-                               if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
-                                       addl_htlcs += 1;
+               let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
+               let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
+               for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
+                       if let Some(_) = htlc.transaction_output_index {
+                               let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
+                                       self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
+                                       &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+                               let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
+                               let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+                               let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
+                               log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
+                                       log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
+                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
+                               if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
+                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
                                }
-                       },
-                       HTLCInitiator::RemoteOffered => {
-                               if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
-                                       addl_htlcs += 1;
+                               if !separate_nondust_htlc_sources {
+                                       htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
+                               }
+                       } else {
+                               htlcs_and_sigs.push((htlc, None, source_opt.take()));
+                       }
+                       if separate_nondust_htlc_sources {
+                               if let Some(source) = source_opt.take() {
+                                       nondust_htlc_sources.push(source);
                                }
                        }
+                       debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
                }
 
-               // When calculating the set of HTLCs which will be included in their next commitment_signed, all
-               // non-dust inbound HTLCs are included (as all states imply it will be included) and only
-               // committed outbound HTLCs, see below.
-               let mut included_htlcs = 0;
-               for ref htlc in context.pending_inbound_htlcs.iter() {
-                       if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
-                               continue
+               let holder_commitment_tx = HolderCommitmentTransaction::new(
+                       commitment_stats.tx,
+                       msg.signature,
+                       msg.htlc_signatures.clone(),
+                       &self.context.get_holder_pubkeys().funding_pubkey,
+                       self.context.counterparty_funding_pubkey()
+               );
+
+               self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
+                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+               // Update state now that we've passed all the can-fail calls...
+               let mut need_commitment = false;
+               if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
+                       if *update_state == FeeUpdateState::RemoteAnnounced {
+                               *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
+                               need_commitment = true;
                        }
-                       included_htlcs += 1;
                }
 
-               for ref htlc in context.pending_outbound_htlcs.iter() {
-                       if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
-                               continue
-                       }
-                       // We only include outbound HTLCs if it will not be included in their next commitment_signed,
-                       // i.e. if they've responded to us with an RAA after announcement.
-                       match htlc.state {
-                               OutboundHTLCState::Committed => included_htlcs += 1,
-                               OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
-                               OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
-                               _ => {},
-                       }
-               }
-
-               let num_htlcs = included_htlcs + addl_htlcs;
-               let res = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, context.opt_anchors());
-               #[cfg(any(test, fuzzing))]
-               {
-                       let mut fee = res;
-                       if fee_spike_buffer_htlc.is_some() {
-                               fee = Self::commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, context.opt_anchors());
+               for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+                       let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
+                               Some(forward_info.clone())
+                       } else { None };
+                       if let Some(forward_info) = new_forward {
+                               log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
+                                       log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+                               htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+                               need_commitment = true;
                        }
-                       let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
-                       let commitment_tx_info = CommitmentTxInfoCached {
-                               fee,
-                               total_pending_htlcs,
-                               next_holder_htlc_id: match htlc.origin {
-                                       HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
-                                       HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
-                               },
-                               next_counterparty_htlc_id: match htlc.origin {
-                                       HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
-                                       HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
-                               },
-                               feerate: context.feerate_per_kw,
-                       };
-                       *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
-               }
-               res
-       }
-
-       pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
-       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
-               // We can't accept HTLCs sent after we've sent a shutdown.
-               let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if local_sent_shutdown {
-                       pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
-               }
-               // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
-               let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
-               if remote_sent_shutdown {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
-               }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
-               }
-               if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
-               }
-               if msg.amount_msat == 0 {
-                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
-               }
-               if msg.amount_msat < self.context.holder_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
-               }
-
-               let inbound_stats = self.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = self.get_outbound_pending_htlc_stats(None);
-               if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
-                       return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
                }
-               if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
-               }
-               // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
-               // the reserve_satoshis we told them to always have as direct payment so that they lose
-               // something if we punish them for broadcasting an old state).
-               // Note that we don't really care about having a small/no to_remote output in our local
-               // commitment transactions, as the purpose of the channel reserve is to ensure we can
-               // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
-               // present in the next commitment transaction we send them (at least for fulfilled ones,
-               // failed ones won't modify value_to_self).
-               // Note that we will send HTLCs which another instance of rust-lightning would think
-               // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
-               // Channel state once they will not be present in the next received commitment
-               // transaction).
-               let mut removed_outbound_total_msat = 0;
-               for ref htlc in self.context.pending_outbound_htlcs.iter() {
-                       if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
-                               removed_outbound_total_msat += htlc.amount_msat;
-                       } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
-                               removed_outbound_total_msat += htlc.amount_msat;
+               let mut claimed_htlcs = Vec::new();
+               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+                       if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
+                               log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+                                       log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+                               // Grab the preimage, if it exists, instead of cloning
+                               let mut reason = OutboundHTLCOutcome::Success(None);
+                               mem::swap(outcome, &mut reason);
+                               if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+                                       // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+                                       // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+                                       // have a `Success(None)` reason. In this case we could forget some HTLC
+                                       // claims, but such an upgrade is unlikely and including claimed HTLCs here
+                                       // fixes a bug which the user was exposed to on 0.0.104 when they started the
+                                       // claim anyway.
+                                       claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+                               }
+                               htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
+                               need_commitment = true;
                        }
                }
 
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
-                       (0, 0)
-               } else {
-                       let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
+               self.context.latest_monitor_update_id += 1;
+               let mut monitor_update = ChannelMonitorUpdate {
+                       update_id: self.context.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+                               commitment_tx: holder_commitment_tx,
+                               htlc_outputs: htlcs_and_sigs,
+                               claimed_htlcs,
+                               nondust_htlc_sources,
+                       }]
                };
-               let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
-               if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
-                       let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
-                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               }
 
-               let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
-               if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
-                       let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
-                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               }
+               self.context.cur_holder_commitment_transaction_number -= 1;
+               // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+               // build_commitment_no_status_check() next which will reset this to RAAFirst.
+               self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
 
-               let pending_value_to_self_msat =
-                       self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
-               let pending_remote_value_msat =
-                       self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
-               if pending_remote_value_msat < msg.amount_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       // In case we initially failed monitor updating without requiring a response, we need
+                       // to make sure the RAA gets sent first.
+                       self.context.monitor_pending_revoke_and_ack = true;
+                       if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+                               // If we were going to send a commitment_signed after the RAA, go ahead and do all
+                               // the corresponding HTLC status updates so that get_last_commitment_update
+                               // includes the right HTLCs.
+                               self.context.monitor_pending_commitment_signed = true;
+                               let mut additional_update = self.build_commitment_no_status_check(logger);
+                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.context.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
+                       }
+                       log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+                               log_bytes!(self.context.channel_id));
+                       return Ok(self.push_ret_blockable_mon_update(monitor_update));
                }
 
-               // Check that the remote can afford to pay for this HTLC on-chain at the current
-               // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
-               let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
-                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
-               };
-               if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
-               };
+               let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+                       // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+                       // we'll send one right away when we get the revoke_and_ack when we
+                       // free_holding_cell_htlcs().
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                       // strictly increasing by one, so decrement it here.
+                       self.context.latest_monitor_update_id = monitor_update.update_id;
+                       monitor_update.updates.append(&mut additional_update.updates);
+                       true
+               } else { false };
 
-               if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
-               }
+               log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+                       log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+               self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+               return Ok(self.push_ret_blockable_mon_update(monitor_update));
+       }
 
-               if !self.context.is_outbound() {
-                       // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
-                       // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
-                       // receiver's side, only on the sender's.
-                       // Note that when we eventually remove support for fee updates and switch to anchor output
-                       // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
-                       // the extra htlc when calculating the next remote commitment transaction fee as we should
-                       // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
-                       // sensitive to fee spikes.
-                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
-                       if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
-                               // Note that if the pending_forward_status is not updated here, then it's because we're already failing
-                               // the HTLC, i.e. its status is already set to failing.
-                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               } else {
-                       // Check that they won't violate our local required channel reserve by adding this HTLC.
-                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None);
-                       if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
-                               return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
-                       }
-               }
-               if self.context.next_counterparty_htlc_id != msg.htlc_id {
-                       return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
-               }
-               if msg.cltv_expiry >= 500000000 {
-                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
-               }
+       /// Public version of the below, checking relevant preconditions first.
+       /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+       /// returns `(None, Vec::new())`.
+       pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+                  (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+                       self.free_holding_cell_htlcs(fee_estimator, logger)
+               } else { (None, Vec::new()) }
+       }
 
-               if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
-                       if let PendingHTLCStatus::Forward(_) = pending_forward_status {
-                               panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
-                       }
-               }
+       /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+       /// for our counterparty.
+       fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+               if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+                       log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
 
-               // Now update local state:
-               self.context.next_counterparty_htlc_id += 1;
-               self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
-                       htlc_id: msg.htlc_id,
-                       amount_msat: msg.amount_msat,
-                       payment_hash: msg.payment_hash,
-                       cltv_expiry: msg.cltv_expiry,
-                       state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
-               });
-               Ok(())
-       }
+                       let mut monitor_update = ChannelMonitorUpdate {
+                               update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+                               updates: Vec::new(),
+                       };
 
-       /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
-       #[inline]
-       fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
-               assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
-               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
-                       if htlc.htlc_id == htlc_id {
-                               let outcome = match check_preimage {
-                                       None => fail_reason.into(),
-                                       Some(payment_preimage) => {
-                                               let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
-                                               if payment_hash != htlc.payment_hash {
-                                                       return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+                       let mut htlc_updates = Vec::new();
+                       mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+                       let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+                       let mut htlcs_to_fail = Vec::new();
+                       for htlc_update in htlc_updates.drain(..) {
+                               // Note that this *can* fail, though it should be due to rather-rare conditions on
+                               // fee races with adding too many outputs which push our total payments just over
+                               // the limit. In case it's less rare than I anticipate, we may want to revisit
+                               // handling this case better and maybe fulfilling some of the HTLCs while attempting
+                               // to rebalance channels.
+                               match &htlc_update {
+                                       &HTLCUpdateAwaitingACK::AddHTLC {
+                                               amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+                                               skimmed_fee_msat, ..
+                                       } => {
+                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
+                                                       onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
+                                               {
+                                                       Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+                                                       Err(e) => {
+                                                               match e {
+                                                                       ChannelError::Ignore(ref msg) => {
+                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
+                                                                                       log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
+                                                                               // If we fail to send here, then this HTLC should
+                                                                               // be failed backwards. Failing to send here
+                                                                               // indicates that this HTLC may keep being put back
+                                                                               // into the holding cell without ever being
+                                                                               // successfully forwarded/failed/fulfilled, causing
+                                                                               // our counterparty to eventually close on us.
+                                                                               htlcs_to_fail.push((source.clone(), *payment_hash));
+                                                                       },
+                                                                       _ => {
+                                                                               panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+                                                                       },
+                                                               }
+                                                       }
+                                               }
+                                       },
+                                       &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+                                               // If an HTLC claim was previously added to the holding cell (via
+                                               // `get_update_fulfill_htlc`, then generating the claim message itself must
+                                               // not fail - any in between attempts to claim the HTLC will have resulted
+                                               // in it hitting the holding cell again and we cannot change the state of a
+                                               // holding cell HTLC from fulfill to anything else.
+                                               let (update_fulfill_msg_option, mut additional_monitor_update) =
+                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+                                                               (msg, monitor_update)
+                                                       } else { unreachable!() };
+                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+                                               monitor_update.updates.append(&mut additional_monitor_update.updates);
+                                       },
+                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+                                               match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+                                                       Ok(update_fail_msg_option) => {
+                                                               // If an HTLC failure was previously added to the holding cell (via
+                                                               // `queue_fail_htlc`) then generating the fail message itself must
+                                                               // not fail - we should never end up in a state where we double-fail
+                                                               // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+                                                               // for a full revocation before failing.
+                                                               update_fail_htlcs.push(update_fail_msg_option.unwrap())
+                                                       },
+                                                       Err(e) => {
+                                                               if let ChannelError::Ignore(_) = e {}
+                                                               else {
+                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+                                                               }
+                                                       }
                                                }
-                                               OutboundHTLCOutcome::Success(Some(payment_preimage))
-                                       }
-                               };
-                               match htlc.state {
-                                       OutboundHTLCState::LocalAnnounced(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
-                                       OutboundHTLCState::Committed => {
-                                               htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
                                        },
-                                       OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
                                }
-                               return Ok(htlc);
                        }
-               }
-               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
-       }
-
-       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
-               }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
-               }
-
-               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
-       }
+                       if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
+                               return (None, htlcs_to_fail);
+                       }
+                       let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+                               self.send_update_fee(feerate, false, fee_estimator, logger)
+                       } else {
+                               None
+                       };
 
-       pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
-               }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
-               }
+                       let mut additional_update = self.build_commitment_no_status_check(logger);
+                       // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+                       // but we want them to be strictly increasing by one, so reset it here.
+                       self.context.latest_monitor_update_id = monitor_update.update_id;
+                       monitor_update.updates.append(&mut additional_update.updates);
 
-               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
-               Ok(())
-       }
+                       log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+                               update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
 
-       pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
-               }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+                       self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                       (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+               } else {
+                       (None, Vec::new())
                }
-
-               self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
-               Ok(())
        }
 
-       pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
-               where L::Target: Logger
+       /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+       /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+       /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+       /// generating an appropriate error *after* the channel state has been updated based on the
+       /// revoke_and_ack message.
+       pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger,
        {
                if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
-               let funding_script = self.context.get_funding_redeemscript();
-
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
-               let commitment_txid = {
-                       let trusted_tx = commitment_stats.tx.trust();
-                       let bitcoin_tx = trusted_tx.built_transaction();
-                       let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+               let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
 
-                       log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
-                               log_bytes!(msg.signature.serialize_compact()[..]),
-                               log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
-                               log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
-                       if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
-                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+               if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+                       if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
                        }
-                       bitcoin_tx.txid
-               };
-               let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+               }
 
-               // If our counterparty updated the channel fee in this commitment transaction, check that
-               // they can actually afford the new fee now.
-               let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
-                       update_state == FeeUpdateState::RemoteAnnounced
-               } else { false };
-               if update_fee {
-                       debug_assert!(!self.context.is_outbound());
-                       let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
-                       if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
-                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
-                       }
+               if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+                       // Our counterparty seems to have burned their coins to us (by revoking a state when we
+                       // haven't given them a new commitment transaction to broadcast). We should probably
+                       // take advantage of this by updating our channel monitor, sending them an error, and
+                       // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
+                       // lot of work, and there's some chance this is all a misunderstanding anyway.
+                       // We have to do *something*, though, since our signer may get mad at us for otherwise
+                       // jumping a remote commitment number, so best to just force-close and move on.
+                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
                }
+
                #[cfg(any(test, fuzzing))]
                {
-                       if self.context.is_outbound() {
-                               let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
-                               *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
-                               if let Some(info) = projected_commit_tx_info {
-                                       let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
-                                               + self.context.holding_cell_htlc_updates.len();
-                                       if info.total_pending_htlcs == total_pending_htlcs
-                                               && info.next_holder_htlc_id == self.context.next_holder_htlc_id
-                                               && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
-                                               && info.feerate == self.context.feerate_per_kw {
-                                                       assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
-                                               }
-                               }
-                       }
+                       *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+                       *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
                }
 
-               if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
-                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
-               }
+               self.context.holder_signer.validate_counterparty_revocation(
+                       self.context.cur_counterparty_commitment_transaction_number + 1,
+                       &secret
+               ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
 
-               // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
-               // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
-               // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
-               // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
-               // backwards compatibility, we never use it in production. To provide test coverage, here,
-               // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
-               #[allow(unused_assignments, unused_mut)]
-               let mut separate_nondust_htlc_sources = false;
-               #[cfg(all(feature = "std", any(test, fuzzing)))] {
-                       use core::hash::{BuildHasher, Hasher};
-                       // Get a random value using the only std API to do so - the DefaultHasher
-                       let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
-                       separate_nondust_htlc_sources = rand_val % 2 == 0;
-               }
+               self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+               self.context.latest_monitor_update_id += 1;
+               let mut monitor_update = ChannelMonitorUpdate {
+                       update_id: self.context.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+                               idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+                               secret: msg.per_commitment_secret,
+                       }],
+               };
 
-               let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
-               let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
-               for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
-                       if let Some(_) = htlc.transaction_output_index {
-                               let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
-                                       self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
-                                       false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+               // Update state now that we've passed all the can-fail calls...
+               // (note that we may still fail to generate the new commitment_signed message, but that's
+               // OK, we step the channel here and *then* if the new generation fails we can fail the
+               // channel based on that, but stepping stuff here should be safe either way.
+               self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+               self.context.sent_message_awaiting_response = None;
+               self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+               self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+               self.context.cur_counterparty_commitment_transaction_number -= 1;
 
-                               let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
-                               let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
-                               let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
-                               log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
-                                       log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
-                                       encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
-                               if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
-                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
-                               }
-                               if !separate_nondust_htlc_sources {
-                                       htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
-                               }
-                       } else {
-                               htlcs_and_sigs.push((htlc, None, source_opt.take()));
-                       }
-                       if separate_nondust_htlc_sources {
-                               if let Some(source) = source_opt.take() {
-                                       nondust_htlc_sources.push(source);
-                               }
-                       }
-                       debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
+               if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+                       self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
                }
 
-               let holder_commitment_tx = HolderCommitmentTransaction::new(
-                       commitment_stats.tx,
-                       msg.signature,
-                       msg.htlc_signatures.clone(),
-                       &self.context.get_holder_pubkeys().funding_pubkey,
-                       self.context.counterparty_funding_pubkey()
-               );
+               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+               let mut to_forward_infos = Vec::new();
+               let mut revoked_htlcs = Vec::new();
+               let mut finalized_claimed_htlcs = Vec::new();
+               let mut update_fail_htlcs = Vec::new();
+               let mut update_fail_malformed_htlcs = Vec::new();
+               let mut require_commitment = false;
+               let mut value_to_self_msat_diff: i64 = 0;
 
-               self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+               {
+                       // Take references explicitly so that we can hold multiple references to self.context.
+                       let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+                       let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
 
-               // Update state now that we've passed all the can-fail calls...
-               let mut need_commitment = false;
-               if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
-                       if *update_state == FeeUpdateState::RemoteAnnounced {
-                               *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
-                               need_commitment = true;
-                       }
-               }
+                       // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+                       pending_inbound_htlcs.retain(|htlc| {
+                               if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+                                       log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
+                                       if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+                                               value_to_self_msat_diff += htlc.amount_msat as i64;
+                                       }
+                                       false
+                               } else { true }
+                       });
+                       pending_outbound_htlcs.retain(|htlc| {
+                               if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+                                       log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
+                                       if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+                                               revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
+                                       } else {
+                                               finalized_claimed_htlcs.push(htlc.source.clone());
+                                               // They fulfilled, so we sent them money
+                                               value_to_self_msat_diff -= htlc.amount_msat as i64;
+                                       }
+                                       false
+                               } else { true }
+                       });
+                       for htlc in pending_inbound_htlcs.iter_mut() {
+                               let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+                                       true
+                               } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+                                       true
+                               } else { false };
+                               if swap {
+                                       let mut state = InboundHTLCState::Committed;
+                                       mem::swap(&mut state, &mut htlc.state);
 
-               for htlc in self.context.pending_inbound_htlcs.iter_mut() {
-                       let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
-                               Some(forward_info.clone())
-                       } else { None };
-                       if let Some(forward_info) = new_forward {
-                               log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
-                                       log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
-                               htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
-                               need_commitment = true;
+                                       if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+                                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+                                               htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+                                               require_commitment = true;
+                                       } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+                                               match forward_info {
+                                                       PendingHTLCStatus::Fail(fail_msg) => {
+                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
+                                                               require_commitment = true;
+                                                               match fail_msg {
+                                                                       HTLCFailureMsg::Relay(msg) => {
+                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+                                                                               update_fail_htlcs.push(msg)
+                                                                       },
+                                                                       HTLCFailureMsg::Malformed(msg) => {
+                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+                                                                               update_fail_malformed_htlcs.push(msg)
+                                                                       },
+                                                               }
+                                                       },
+                                                       PendingHTLCStatus::Forward(forward_info) => {
+                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
+                                                               to_forward_infos.push((forward_info, htlc.htlc_id));
+                                                               htlc.state = InboundHTLCState::Committed;
+                                                       }
+                                               }
+                                       }
+                               }
                        }
-               }
-               let mut claimed_htlcs = Vec::new();
-               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
-                       if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
-                               log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
-                                       log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
-                               // Grab the preimage, if it exists, instead of cloning
-                               let mut reason = OutboundHTLCOutcome::Success(None);
-                               mem::swap(outcome, &mut reason);
-                               if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
-                                       // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
-                                       // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
-                                       // have a `Success(None)` reason. In this case we could forget some HTLC
-                                       // claims, but such an upgrade is unlikely and including claimed HTLCs here
-                                       // fixes a bug which the user was exposed to on 0.0.104 when they started the
-                                       // claim anyway.
-                                       claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+                       for htlc in pending_outbound_htlcs.iter_mut() {
+                               if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+                                       log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
+                                       htlc.state = OutboundHTLCState::Committed;
+                               }
+                               if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+                                       log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+                                       // Grab the preimage, if it exists, instead of cloning
+                                       let mut reason = OutboundHTLCOutcome::Success(None);
+                                       mem::swap(outcome, &mut reason);
+                                       htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+                                       require_commitment = true;
                                }
-                               htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
-                               need_commitment = true;
                        }
                }
+               self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
 
-               self.context.latest_monitor_update_id += 1;
-               let mut monitor_update = ChannelMonitorUpdate {
-                       update_id: self.context.latest_monitor_update_id,
-                       updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
-                               commitment_tx: holder_commitment_tx,
-                               htlc_outputs: htlcs_and_sigs,
-                               claimed_htlcs,
-                               nondust_htlc_sources,
-                       }]
-               };
-
-               self.context.cur_holder_commitment_transaction_number -= 1;
-               // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
-               // build_commitment_no_status_check() next which will reset this to RAAFirst.
-               self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+               if let Some((feerate, update_state)) = self.context.pending_update_fee {
+                       match update_state {
+                               FeeUpdateState::Outbound => {
+                                       debug_assert!(self.context.is_outbound());
+                                       log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+                                       self.context.feerate_per_kw = feerate;
+                                       self.context.pending_update_fee = None;
+                               },
+                               FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
+                               FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+                                       debug_assert!(!self.context.is_outbound());
+                                       log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+                                       require_commitment = true;
+                                       self.context.feerate_per_kw = feerate;
+                                       self.context.pending_update_fee = None;
+                               },
+                       }
+               }
 
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
-                       // In case we initially failed monitor updating without requiring a response, we need
-                       // to make sure the RAA gets sent first.
-                       self.context.monitor_pending_revoke_and_ack = true;
-                       if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
-                               // If we were going to send a commitment_signed after the RAA, go ahead and do all
-                               // the corresponding HTLC status updates so that get_last_commitment_update
-                               // includes the right HTLCs.
+               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+                       // We can't actually generate a new commitment transaction (incl by freeing holding
+                       // cells) while we can't update the monitor, so we just return what we have.
+                       if require_commitment {
                                self.context.monitor_pending_commitment_signed = true;
+                               // When the monitor updating is restored we'll call get_last_commitment_update(),
+                               // which does not update state, but we're definitely now awaiting a remote revoke
+                               // before we can step forward any more, so set it here.
                                let mut additional_update = self.build_commitment_no_status_check(logger);
                                // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.context.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
                        }
-                       log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
-                               log_bytes!(self.context.channel_id));
-                       return Ok(self.push_ret_blockable_mon_update(monitor_update));
+                       self.context.monitor_pending_forwards.append(&mut to_forward_infos);
+                       self.context.monitor_pending_failures.append(&mut revoked_htlcs);
+                       self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
+                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+                       return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
                }
 
-               let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
-                       // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
-                       // we'll send one right away when we get the revoke_and_ack when we
-                       // free_holding_cell_htlcs().
-                       let mut additional_update = self.build_commitment_no_status_check(logger);
-                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
-                       // strictly increasing by one, so decrement it here.
-                       self.context.latest_monitor_update_id = monitor_update.update_id;
-                       monitor_update.updates.append(&mut additional_update.updates);
-                       true
-               } else { false };
+               match self.free_holding_cell_htlcs(fee_estimator, logger) {
+                       (Some(mut additional_update), htlcs_to_fail) => {
+                               // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+                               // strictly increasing by one, so decrement it here.
+                               self.context.latest_monitor_update_id = monitor_update.update_id;
+                               monitor_update.updates.append(&mut additional_update.updates);
 
-               log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
-                       log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
-               self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
-               return Ok(self.push_ret_blockable_mon_update(monitor_update));
+                               self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                               Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+                       },
+                       (None, htlcs_to_fail) => {
+                               if require_commitment {
+                                       let mut additional_update = self.build_commitment_no_status_check(logger);
+
+                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+                                       // strictly increasing by one, so decrement it here.
+                                       self.context.latest_monitor_update_id = monitor_update.update_id;
+                                       monitor_update.updates.append(&mut additional_update.updates);
+
+                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
+                                               log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+                                       self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+                               } else {
+                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
+                                       self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+                               }
+                       }
+               }
        }
 
-       /// Public version of the below, checking relevant preconditions first.
-       /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
-       /// returns `(None, Vec::new())`.
-       pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
-               if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
-                  (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
-                       self.free_holding_cell_htlcs(logger)
-               } else { (None, Vec::new()) }
+       /// Queues up an outbound update fee by placing it in the holding cell. You should call
+       /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+       /// commitment update.
+       pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
+               assert!(msg_opt.is_none(), "We forced holding cell?");
        }
 
-       /// Frees any pending commitment updates in the holding cell, generating the relevant messages
-       /// for our counterparty.
-       fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
-               if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
-                       log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
-                               if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
-
-                       let mut monitor_update = ChannelMonitorUpdate {
-                               update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
-                               updates: Vec::new(),
-                       };
-
-                       let mut htlc_updates = Vec::new();
-                       mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
-                       let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
-                       let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
-                       let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
-                       let mut htlcs_to_fail = Vec::new();
-                       for htlc_update in htlc_updates.drain(..) {
-                               // Note that this *can* fail, though it should be due to rather-rare conditions on
-                               // fee races with adding too many outputs which push our total payments just over
-                               // the limit. In case it's less rare than I anticipate, we may want to revisit
-                               // handling this case better and maybe fulfilling some of the HTLCs while attempting
-                               // to rebalance channels.
-                               match &htlc_update {
-                                       &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
-                                               match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
-                                                       Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
-                                                       Err(e) => {
-                                                               match e {
-                                                                       ChannelError::Ignore(ref msg) => {
-                                                                               log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
-                                                                                       log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
-                                                                               // If we fail to send here, then this HTLC should
-                                                                               // be failed backwards. Failing to send here
-                                                                               // indicates that this HTLC may keep being put back
-                                                                               // into the holding cell without ever being
-                                                                               // successfully forwarded/failed/fulfilled, causing
-                                                                               // our counterparty to eventually close on us.
-                                                                               htlcs_to_fail.push((source.clone(), *payment_hash));
-                                                                       },
-                                                                       _ => {
-                                                                               panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
-                                                                       },
-                                                               }
-                                                       }
-                                               }
-                                       },
-                                       &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
-                                               // If an HTLC claim was previously added to the holding cell (via
-                                               // `get_update_fulfill_htlc`, then generating the claim message itself must
-                                               // not fail - any in between attempts to claim the HTLC will have resulted
-                                               // in it hitting the holding cell again and we cannot change the state of a
-                                               // holding cell HTLC from fulfill to anything else.
-                                               let (update_fulfill_msg_option, mut additional_monitor_update) =
-                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
-                                                               (msg, monitor_update)
-                                                       } else { unreachable!() };
-                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
-                                               monitor_update.updates.append(&mut additional_monitor_update.updates);
-                                       },
-                                       &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
-                                               match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
-                                                       Ok(update_fail_msg_option) => {
-                                                               // If an HTLC failure was previously added to the holding cell (via
-                                                               // `queue_fail_htlc`) then generating the fail message itself must
-                                                               // not fail - we should never end up in a state where we double-fail
-                                                               // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
-                                                               // for a full revocation before failing.
-                                                               update_fail_htlcs.push(update_fail_msg_option.unwrap())
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
-                                       },
-                               }
-                       }
-                       if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
-                               return (None, htlcs_to_fail);
-                       }
-                       let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
-                               self.send_update_fee(feerate, false, logger)
-                       } else {
-                               None
-                       };
-
-                       let mut additional_update = self.build_commitment_no_status_check(logger);
-                       // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
-                       // but we want them to be strictly increasing by one, so reset it here.
-                       self.context.latest_monitor_update_id = monitor_update.update_id;
-                       monitor_update.updates.append(&mut additional_update.updates);
-
-                       log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
-                               log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
-                               update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
-
-                       self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
-                       (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
-               } else {
-                       (None, Vec::new())
-               }
-       }
-
-       /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
-       /// commitment_signed message here in case we had pending outbound HTLCs to add which were
-       /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
-       /// generating an appropriate error *after* the channel state has been updated based on the
-       /// revoke_and_ack message.
-       pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
-               where L::Target: Logger,
+       /// Adds a pending update to this channel. See the doc for send_htlc for
+       /// further details on the optionness of the return value.
+       /// If our balance is too low to cover the cost of the next commitment transaction at the
+       /// new feerate, the update is cancelled.
+       ///
+       /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
+       /// [`Channel`] if `force_holding_cell` is false.
+       fn send_update_fee<F: Deref, L: Deref>(
+               &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Option<msgs::UpdateFee>
+       where F::Target: FeeEstimator, L::Target: Logger
        {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+               if !self.context.is_outbound() {
+                       panic!("Cannot send fee from inbound channel");
                }
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+               if !self.context.is_usable() {
+                       panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+               if !self.context.is_live() {
+                       panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
                }
 
-               let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
+               // Before proposing a feerate update, check that we can actually afford the new fee.
+               let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
+               let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
+               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+               let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
+               let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+               if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
+                       //TODO: auto-close after a number of failures?
+                       log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+                       return None;
+               }
 
-               if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
-                       if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
-                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
-                       }
+               // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
+               let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+               let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+                       log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+                       return None;
+               }
+               if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+                       log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+                       return None;
                }
 
-               if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
-                       // Our counterparty seems to have burned their coins to us (by revoking a state when we
-                       // haven't given them a new commitment transaction to broadcast). We should probably
-                       // take advantage of this by updating our channel monitor, sending them an error, and
-                       // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
-                       // lot of work, and there's some chance this is all a misunderstanding anyway.
-                       // We have to do *something*, though, since our signer may get mad at us for otherwise
-                       // jumping a remote commitment number, so best to just force-close and move on.
-                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+                       force_holding_cell = true;
                }
 
-               #[cfg(any(test, fuzzing))]
-               {
-                       *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
-                       *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+               if force_holding_cell {
+                       self.context.holding_cell_update_fee = Some(feerate_per_kw);
+                       return None;
                }
 
-               self.context.holder_signer.validate_counterparty_revocation(
-                       self.context.cur_counterparty_commitment_transaction_number + 1,
-                       &secret
-               ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+               debug_assert!(self.context.pending_update_fee.is_none());
+               self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
 
-               self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
-                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
-               self.context.latest_monitor_update_id += 1;
-               let mut monitor_update = ChannelMonitorUpdate {
-                       update_id: self.context.latest_monitor_update_id,
-                       updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
-                               idx: self.context.cur_counterparty_commitment_transaction_number + 1,
-                               secret: msg.per_commitment_secret,
-                       }],
-               };
+               Some(msgs::UpdateFee {
+                       channel_id: self.context.channel_id,
+                       feerate_per_kw,
+               })
+       }
 
-               // Update state now that we've passed all the can-fail calls...
-               // (note that we may still fail to generate the new commitment_signed message, but that's
-               // OK, we step the channel here and *then* if the new generation fails we can fail the
-               // channel based on that, but stepping stuff here should be safe either way.
-               self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
-               self.context.sent_message_awaiting_response = None;
-               self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
-               self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
-               self.context.cur_counterparty_commitment_transaction_number -= 1;
+       /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
+       /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
+       /// resent.
+       /// No further message handling calls may be made until a channel_reestablish dance has
+       /// completed.
+       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
+               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+               if self.context.channel_state < ChannelState::FundingSent as u32 {
+                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
+                       return;
+               }
 
-               if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
-                       self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+                       // While the below code should be idempotent, it's simpler to just return early, as
+                       // redundant disconnect events can fire, though they should be rare.
+                       return;
                }
 
-               log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
-               let mut to_forward_infos = Vec::new();
-               let mut revoked_htlcs = Vec::new();
-               let mut finalized_claimed_htlcs = Vec::new();
-               let mut update_fail_htlcs = Vec::new();
-               let mut update_fail_malformed_htlcs = Vec::new();
-               let mut require_commitment = false;
-               let mut value_to_self_msat_diff: i64 = 0;
+               if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+                       self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+               }
 
-               {
-                       // Take references explicitly so that we can hold multiple references to self.context.
-                       let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
-                       let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
+               // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+               // will be retransmitted.
+               self.context.last_sent_closing_fee = None;
+               self.context.pending_counterparty_closing_signed = None;
+               self.context.closing_fee_limits = None;
 
-                       // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
-                       pending_inbound_htlcs.retain(|htlc| {
-                               if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
-                                       log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
-                                       if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
-                                               value_to_self_msat_diff += htlc.amount_msat as i64;
-                                       }
-                                       false
-                               } else { true }
-                       });
-                       pending_outbound_htlcs.retain(|htlc| {
-                               if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
-                                       log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
-                                       if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
-                                               revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
-                                       } else {
-                                               finalized_claimed_htlcs.push(htlc.source.clone());
-                                               // They fulfilled, so we sent them money
-                                               value_to_self_msat_diff -= htlc.amount_msat as i64;
-                                       }
-                                       false
-                               } else { true }
-                       });
-                       for htlc in pending_inbound_htlcs.iter_mut() {
-                               let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
-                                       true
-                               } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
-                                       true
-                               } else { false };
-                               if swap {
-                                       let mut state = InboundHTLCState::Committed;
-                                       mem::swap(&mut state, &mut htlc.state);
-
-                                       if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
-                                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
-                                               htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
-                                               require_commitment = true;
-                                       } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
-                                               match forward_info {
-                                                       PendingHTLCStatus::Fail(fail_msg) => {
-                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
-                                                               require_commitment = true;
-                                                               match fail_msg {
-                                                                       HTLCFailureMsg::Relay(msg) => {
-                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
-                                                                               update_fail_htlcs.push(msg)
-                                                                       },
-                                                                       HTLCFailureMsg::Malformed(msg) => {
-                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
-                                                                               update_fail_malformed_htlcs.push(msg)
-                                                                       },
-                                                               }
-                                                       },
-                                                       PendingHTLCStatus::Forward(forward_info) => {
-                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
-                                                               to_forward_infos.push((forward_info, htlc.htlc_id));
-                                                               htlc.state = InboundHTLCState::Committed;
-                                                       }
-                                               }
-                                       }
-                               }
-                       }
-                       for htlc in pending_outbound_htlcs.iter_mut() {
-                               if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
-                                       log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
-                                       htlc.state = OutboundHTLCState::Committed;
-                               }
-                               if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
-                                       log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
-                                       // Grab the preimage, if it exists, instead of cloning
-                                       let mut reason = OutboundHTLCOutcome::Success(None);
-                                       mem::swap(outcome, &mut reason);
-                                       htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
-                                       require_commitment = true;
-                               }
-                       }
-               }
-               self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
-
-               if let Some((feerate, update_state)) = self.context.pending_update_fee {
-                       match update_state {
-                               FeeUpdateState::Outbound => {
-                                       debug_assert!(self.context.is_outbound());
-                                       log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
-                                       self.context.feerate_per_kw = feerate;
-                                       self.context.pending_update_fee = None;
-                               },
-                               FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
-                               FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
-                                       debug_assert!(!self.context.is_outbound());
-                                       log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
-                                       require_commitment = true;
-                                       self.context.feerate_per_kw = feerate;
-                                       self.context.pending_update_fee = None;
-                               },
-                       }
-               }
-
-               if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
-                       // We can't actually generate a new commitment transaction (incl by freeing holding
-                       // cells) while we can't update the monitor, so we just return what we have.
-                       if require_commitment {
-                               self.context.monitor_pending_commitment_signed = true;
-                               // When the monitor updating is restored we'll call get_last_commitment_update(),
-                               // which does not update state, but we're definitely now awaiting a remote revoke
-                               // before we can step forward any more, so set it here.
-                               let mut additional_update = self.build_commitment_no_status_check(logger);
-                               // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
-                               // strictly increasing by one, so decrement it here.
-                               self.context.latest_monitor_update_id = monitor_update.update_id;
-                               monitor_update.updates.append(&mut additional_update.updates);
-                       }
-                       self.context.monitor_pending_forwards.append(&mut to_forward_infos);
-                       self.context.monitor_pending_failures.append(&mut revoked_htlcs);
-                       self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
-                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
-                       return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
-               }
-
-               match self.free_holding_cell_htlcs(logger) {
-                       (Some(_), htlcs_to_fail) => {
-                               let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
-                               // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
-                               // strictly increasing by one, so decrement it here.
-                               self.context.latest_monitor_update_id = monitor_update.update_id;
-                               monitor_update.updates.append(&mut additional_update.updates);
-
-                               self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                               Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
-                       },
-                       (None, htlcs_to_fail) => {
-                               if require_commitment {
-                                       let mut additional_update = self.build_commitment_no_status_check(logger);
-
-                                       // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
-                                       // strictly increasing by one, so decrement it here.
-                                       self.context.latest_monitor_update_id = monitor_update.update_id;
-                                       monitor_update.updates.append(&mut additional_update.updates);
-
-                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
-                                               log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
-                                       self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
-                               } else {
-                                       log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
-                                       self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
-                                       Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
-                               }
-                       }
-               }
-       }
-
-       /// Queues up an outbound update fee by placing it in the holding cell. You should call
-       /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
-       /// commitment update.
-       pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
-               let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
-               assert!(msg_opt.is_none(), "We forced holding cell?");
-       }
-
-       /// Adds a pending update to this channel. See the doc for send_htlc for
-       /// further details on the optionness of the return value.
-       /// If our balance is too low to cover the cost of the next commitment transaction at the
-       /// new feerate, the update is cancelled.
-       ///
-       /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
-       /// [`Channel`] if `force_holding_cell` is false.
-       fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
-               if !self.context.is_outbound() {
-                       panic!("Cannot send fee from inbound channel");
-               }
-               if !self.context.is_usable() {
-                       panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
-               }
-               if !self.context.is_live() {
-                       panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
-               }
-
-               // Before proposing a feerate update, check that we can actually afford the new fee.
-               let inbound_stats = self.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
-               let outbound_stats = self.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
-               let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
-               let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
-               if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
-                       //TODO: auto-close after a number of failures?
-                       log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
-                       return None;
-               }
-
-               // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
-               let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
-                       log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
-                       return None;
-               }
-               if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
-                       log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
-                       return None;
-               }
-
-               if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
-                       force_holding_cell = true;
-               }
-
-               if force_holding_cell {
-                       self.context.holding_cell_update_fee = Some(feerate_per_kw);
-                       return None;
-               }
-
-               debug_assert!(self.context.pending_update_fee.is_none());
-               self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
-
-               Some(msgs::UpdateFee {
-                       channel_id: self.context.channel_id,
-                       feerate_per_kw,
-               })
-       }
-
-       /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
-       /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
-       /// resent.
-       /// No further message handling calls may be made until a channel_reestablish dance has
-       /// completed.
-       pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L)  where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-                       return;
-               }
-
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
-                       // While the below code should be idempotent, it's simpler to just return early, as
-                       // redundant disconnect events can fire, though they should be rare.
-                       return;
-               }
-
-               if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
-                       self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
-               }
-
-               // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
-               // will be retransmitted.
-               self.context.last_sent_closing_fee = None;
-               self.context.pending_counterparty_closing_signed = None;
-               self.context.closing_fee_limits = None;
-
-               let mut inbound_drop_count = 0;
-               self.context.pending_inbound_htlcs.retain(|htlc| {
-                       match htlc.state {
-                               InboundHTLCState::RemoteAnnounced(_) => {
-                                       // They sent us an update_add_htlc but we never got the commitment_signed.
-                                       // We'll tell them what commitment_signed we're expecting next and they'll drop
-                                       // this HTLC accordingly
-                                       inbound_drop_count += 1;
+               let mut inbound_drop_count = 0;
+               self.context.pending_inbound_htlcs.retain(|htlc| {
+                       match htlc.state {
+                               InboundHTLCState::RemoteAnnounced(_) => {
+                                       // They sent us an update_add_htlc but we never got the commitment_signed.
+                                       // We'll tell them what commitment_signed we're expecting next and they'll drop
+                                       // this HTLC accordingly
+                                       inbound_drop_count += 1;
                                        false
                                },
                                InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
@@ -4341,12 +3609,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        {
                assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
                self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
-               let mut found_blocked = false;
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
-                       if upd.blocked { found_blocked = true; }
-                       upd.blocked
-               });
 
                // If we're past (or at) the FundingSent stage on an outbound channel, try to
                // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
@@ -4435,15 +3697,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
                // won't be pushed over our dust exposure limit by the feerate increase.
                if feerate_over_dust_buffer {
-                       let inbound_stats = self.get_inbound_pending_htlc_stats(None);
-                       let outbound_stats = self.get_outbound_pending_htlc_stats(None);
+                       let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+                       let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
                        let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
                        let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-                       if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+                       if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
                                        msg.feerate_per_kw, holder_tx_dust_exposure)));
                        }
-                       if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+                       if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
                                return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
                                        msg.feerate_per_kw, counterparty_tx_dust_exposure)));
                        }
@@ -4478,6 +3741,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        payment_hash: htlc.payment_hash,
                                        cltv_expiry: htlc.cltv_expiry,
                                        onion_routing_packet: (**onion_packet).clone(),
+                                       skimmed_fee_msat: htlc.skimmed_fee_msat,
                                });
                        }
                }
@@ -4764,12 +4028,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        /// this point if we're the funder we should send the initial closing_signed, and in any case
        /// shutdown should complete within a reasonable timeframe.
        fn closing_negotiation_ready(&self) -> bool {
-               self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
-                       self.context.channel_state &
-                               (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
-                                ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
-                               == BOTH_SIDES_SHUTDOWN_MASK &&
-                       self.context.pending_update_fee.is_none()
+               self.context.closing_negotiation_ready()
        }
 
        /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
@@ -4849,7 +4108,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn shutdown<SP: Deref>(
                &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
-       ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+       ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        where SP::Target: SignerProvider
        {
                if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4915,9 +4174,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                }],
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
+                       self.push_ret_blockable_mon_update(monitor_update)
                } else { None };
                let shutdown = if send_shutdown {
                        Some(msgs::Shutdown {
@@ -5207,64 +4464,37 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
        }
 
-       pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
-               if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
-               self.context.pending_monitor_updates[0].update.update_id - 1
+       /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+       pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+               if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+               self.context.blocked_monitor_updates[0].update.update_id - 1
        }
 
        /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
        /// further blocked monitor update exists after the next.
-       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
-               for i in 0..self.context.pending_monitor_updates.len() {
-                       if self.context.pending_monitor_updates[i].blocked {
-                               self.context.pending_monitor_updates[i].blocked = false;
-                               return Some((&self.context.pending_monitor_updates[i].update,
-                                       self.context.pending_monitor_updates.len() > i + 1));
-                       }
-               }
-               None
-       }
-
-       /// Pushes a new monitor update into our monitor update queue, returning whether it should be
-       /// immediately given to the user for persisting or if it should be held as blocked.
-       fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
-               let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
-               self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
-                       update, blocked: !release_monitor
-               });
-               release_monitor
+       pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+               if self.context.blocked_monitor_updates.is_empty() { return None; }
+               Some((self.context.blocked_monitor_updates.remove(0).update,
+                       !self.context.blocked_monitor_updates.is_empty()))
        }
 
-       /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
-       /// it should be immediately given to the user for persisting or `None` if it should be held as
-       /// blocked.
+       /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+       /// immediately given to the user for persisting or `None` if it should be held as blocked.
        fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
-       -> Option<&ChannelMonitorUpdate> {
-               let release_monitor = self.push_blockable_mon_update(update);
-               if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
-       }
-
-       pub fn no_monitor_updates_pending(&self) -> bool {
-               self.context.pending_monitor_updates.is_empty()
-       }
-
-       pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| {
-                       if upd.update.update_id <= update_id {
-                               assert!(!upd.blocked, "Completed update must have flown");
-                               false
-                       } else { true }
-               });
-       }
-
-       pub fn complete_one_mon_update(&mut self, update_id: u64) {
-               self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
+       -> Option<ChannelMonitorUpdate> {
+               let release_monitor = self.context.blocked_monitor_updates.is_empty();
+               if !release_monitor {
+                       self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+                               update,
+                       });
+                       None
+               } else {
+                       Some(update)
+               }
        }
 
-       /// Returns an iterator over all unblocked monitor updates which have not yet completed.
-       pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
-               self.context.pending_monitor_updates.iter()
-                       .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
+       pub fn blocked_monitor_updates_pending(&self) -> usize {
+               self.context.blocked_monitor_updates.len()
        }
 
        /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
@@ -5593,8 +4823,962 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
        }
 
-       // Methods to get unprompted messages to send to the remote end (or where we already returned
-       // something in the handler for the message that prompted this message):
+       // Methods to get unprompted messages to send to the remote end (or where we already returned
+       // something in the handler for the message that prompted this message):
+
+       /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
+       /// announceable and available for use (have exchanged ChannelReady messages in both
+       /// directions). Should be used for both broadcasted announcements and in response to an
+       /// AnnouncementSignatures message from the remote peer.
+       ///
+       /// Will only fail if we're not in a state where channel_announcement may be sent (including
+       /// closing).
+       ///
+       /// This will only return ChannelError::Ignore upon failure.
+       fn get_channel_announcement<NS: Deref>(
+               &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
+       ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+               if !self.context.config.announced_channel {
+                       return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+               }
+               if !self.context.is_usable() {
+                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+               }
+
+               let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+                       .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
+               let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
+               let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
+
+               let msg = msgs::UnsignedChannelAnnouncement {
+                       features: channelmanager::provided_channel_features(&user_config),
+                       chain_hash,
+                       short_channel_id: self.context.get_short_channel_id().unwrap(),
+                       node_id_1: if were_node_one { node_id } else { counterparty_node_id },
+                       node_id_2: if were_node_one { counterparty_node_id } else { node_id },
+                       bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
+                       bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
+                       excess_data: Vec::new(),
+               };
+
+               Ok(msg)
+       }
+
+       fn get_announcement_sigs<NS: Deref, L: Deref>(
+               &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
+               best_block_height: u32, logger: &L
+       ) -> Option<msgs::AnnouncementSignatures>
+       where
+               NS::Target: NodeSigner,
+               L::Target: Logger
+       {
+               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+                       return None;
+               }
+
+               if !self.context.is_usable() {
+                       return None;
+               }
+
+               if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+                       log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
+                       return None;
+               }
+
+               if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
+                       return None;
+               }
+
+               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
+               let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
+                       Ok(a) => a,
+                       Err(e) => {
+                               log_trace!(logger, "{:?}", e);
+                               return None;
+                       }
+               };
+               let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
+                       Err(_) => {
+                               log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
+                               return None;
+                       },
+                       Ok(v) => v
+               };
+               let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
+                       Err(_) => {
+                               log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
+                               return None;
+                       },
+                       Ok(v) => v
+               };
+               self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
+
+               Some(msgs::AnnouncementSignatures {
+                       channel_id: self.context.channel_id(),
+                       short_channel_id: self.context.get_short_channel_id().unwrap(),
+                       node_signature: our_node_sig,
+                       bitcoin_signature: our_bitcoin_sig,
+               })
+       }
+
+       /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
+       /// available.
+       fn sign_channel_announcement<NS: Deref>(
+               &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
+       ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+               if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
+                       let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+                               .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
+                       let were_node_one = announcement.node_id_1 == our_node_key;
+
+                       let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
+                               .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
+                       let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
+                               .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
+                       Ok(msgs::ChannelAnnouncement {
+                               node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
+                               node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
+                               bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
+                               bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
+                               contents: announcement,
+                       })
+               } else {
+                       Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+               }
+       }
+
+       /// Processes an incoming announcement_signatures message, providing a fully-signed
+       /// channel_announcement message which we can broadcast and storing our counterparty's
+       /// signatures for later reconstruction/rebroadcast of the channel_announcement.
+       pub fn announcement_signatures<NS: Deref>(
+               &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
+               msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
+       ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+               let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+
+               let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+
+               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
+                       return Err(ChannelError::Close(format!(
+                               "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
+                                &announcement, self.context.get_counterparty_node_id())));
+               }
+               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
+                       return Err(ChannelError::Close(format!(
+                               "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
+                               &announcement, self.context.counterparty_funding_pubkey())));
+               }
+
+               self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
+               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+                       return Err(ChannelError::Ignore(
+                               "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
+               }
+
+               self.sign_channel_announcement(node_signer, announcement)
+       }
+
+       /// Gets a signed channel_announcement for this channel, if we previously received an
+       /// announcement_signatures from our counterparty.
+       pub fn get_signed_channel_announcement<NS: Deref>(
+               &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
+       ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
+               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+                       return None;
+               }
+               let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
+                       Ok(res) => res,
+                       Err(_) => return None,
+               };
+               match self.sign_channel_announcement(node_signer, announcement) {
+                       Ok(res) => Some(res),
+                       Err(_) => None,
+               }
+       }
+
+       /// May panic if called on a channel that wasn't immediately-previously
+       /// self.remove_uncommitted_htlcs_and_mark_paused()'d
+       pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
+               assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+               assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
+               // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
+               // current to_remote balances. However, it no longer has any use, and thus is now simply
+               // set to a dummy (but valid, as required by the spec) public key.
+               // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
+               // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
+               // valid, and valid in fuzzing mode's arbitrary validity criteria:
+               let mut pk = [2; 33]; pk[1] = 0xff;
+               let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
+               let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+                       let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
+                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
+                       remote_last_secret
+               } else {
+                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
+                       [0;32]
+               };
+               self.mark_awaiting_response();
+               msgs::ChannelReestablish {
+                       channel_id: self.context.channel_id(),
+                       // The protocol has two different commitment number concepts - the "commitment
+                       // transaction number", which starts from 0 and counts up, and the "revocation key
+                       // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
+                       // commitment transaction numbers by the index which will be used to reveal the
+                       // revocation key for that commitment transaction, which means we have to convert them
+                       // to protocol-level commitment numbers here...
+
+                       // next_local_commitment_number is the next commitment_signed number we expect to
+                       // receive (indicating if they need to resend one that we missed).
+                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+                       // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
+                       // receive, however we track it by the next commitment number for a remote transaction
+                       // (which is one further, as they always revoke previous commitment transaction, not
+                       // the one we send) so we have to decrement by 1. Note that if
+                       // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
+                       // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+                       // overflow here.
+                       next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
+                       your_last_per_commitment_secret: remote_last_secret,
+                       my_current_per_commitment_point: dummy_pubkey,
+                       // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+                       // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+                       // txid of that interactive transaction, else we MUST NOT set it.
+                       next_funding_txid: None,
+               }
+       }
+
+
+       // Send stuff to our remote peers:
+
+       /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
+       /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+       /// commitment update.
+       ///
+       /// `Err`s will only be [`ChannelError::Ignore`].
+       pub fn queue_add_htlc<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+               onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<(), ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               self
+                       .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
+                               skimmed_fee_msat, fee_estimator, logger)
+                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+                       .map_err(|err| {
+                               if let ChannelError::Ignore(_) = err { /* fine */ }
+                               else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
+                               err
+                       })
+       }
+
+       /// Adds a pending outbound HTLC to this channel, note that you probably want
+       /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
+       ///
+       /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+       /// the wire:
+       /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+       ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+       ///   awaiting ACK.
+       /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
+       ///   we may not yet have sent the previous commitment update messages and will need to
+       ///   regenerate them.
+       ///
+       /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
+       /// on this [`Channel`] if `force_holding_cell` is false.
+       ///
+       /// `Err`s will only be [`ChannelError::Ignore`].
+       fn send_htlc<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+               onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
+               skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+                       return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
+               }
+               let channel_total_msat = self.context.channel_value_satoshis * 1000;
+               if amount_msat > channel_total_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
+               }
+
+               if amount_msat == 0 {
+                       return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
+               }
+
+               let available_balances = self.context.get_available_balances(fee_estimator);
+               if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+                               available_balances.next_outbound_htlc_minimum_msat)));
+               }
+
+               if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+                               available_balances.next_outbound_htlc_limit_msat)));
+               }
+
+               if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+                       // Note that this should never really happen, if we're !is_live() on receipt of an
+                       // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
+                       // the user to send directly into a !is_live() channel. However, if we
+                       // disconnected during the time the previous hop was doing the commitment dance we may
+                       // end up getting here after the forwarding delay. In any case, returning an
+                       // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
+                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
+               }
+
+               let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+               log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+                       if force_holding_cell { "into holding cell" }
+                       else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+                       else { "to peer" });
+
+               if need_holding_cell {
+                       force_holding_cell = true;
+               }
+
+               // Now update local state:
+               if force_holding_cell {
+                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
+                               amount_msat,
+                               payment_hash,
+                               cltv_expiry,
+                               source,
+                               onion_routing_packet,
+                               skimmed_fee_msat,
+                       });
+                       return Ok(None);
+               }
+
+               self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
+                       htlc_id: self.context.next_holder_htlc_id,
+                       amount_msat,
+                       payment_hash: payment_hash.clone(),
+                       cltv_expiry,
+                       state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
+                       source,
+                       skimmed_fee_msat,
+               });
+
+               let res = msgs::UpdateAddHTLC {
+                       channel_id: self.context.channel_id,
+                       htlc_id: self.context.next_holder_htlc_id,
+                       amount_msat,
+                       payment_hash,
+                       cltv_expiry,
+                       onion_routing_packet,
+                       skimmed_fee_msat,
+               };
+               self.context.next_holder_htlc_id += 1;
+
+               Ok(Some(res))
+       }
+
+       fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
+               log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
+               // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+               // fail to generate this, we still are at least at a position where upgrading their status
+               // is acceptable.
+               for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+                       let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
+                               Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
+                       } else { None };
+                       if let Some(state) = new_state {
+                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+                               htlc.state = state;
+                       }
+               }
+               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+                       if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+                               log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+                               // Grab the preimage, if it exists, instead of cloning
+                               let mut reason = OutboundHTLCOutcome::Success(None);
+                               mem::swap(outcome, &mut reason);
+                               htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+                       }
+               }
+               if let Some((feerate, update_state)) = self.context.pending_update_fee {
+                       if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
+                               debug_assert!(!self.context.is_outbound());
+                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+                               self.context.feerate_per_kw = feerate;
+                               self.context.pending_update_fee = None;
+                       }
+               }
+               self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
+
+               let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
+               let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+                       htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+
+               if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
+                       self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
+               }
+
+               self.context.latest_monitor_update_id += 1;
+               let monitor_update = ChannelMonitorUpdate {
+                       update_id: self.context.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
+                               commitment_txid: counterparty_commitment_txid,
+                               htlc_outputs: htlcs.clone(),
+                               commitment_number: self.context.cur_counterparty_commitment_transaction_number,
+                               their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
+                       }]
+               };
+               self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+               monitor_update
+       }
+
+       fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+               let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+
+               #[cfg(any(test, fuzzing))]
+               {
+                       if !self.context.is_outbound() {
+                               let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
+                               *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+                               if let Some(info) = projected_commit_tx_info {
+                                       let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
+                                       if info.total_pending_htlcs == total_pending_htlcs
+                                               && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+                                               && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+                                               && info.feerate == self.context.feerate_per_kw {
+                                                       let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
+                                                       assert_eq!(actual_fee, info.fee);
+                                               }
+                               }
+                       }
+               }
+
+               (counterparty_commitment_txid, commitment_stats.htlcs_included)
+       }
+
+       /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+       /// generation when we shouldn't change HTLC/channel state.
+       fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+               // Get the fee tests from `build_commitment_no_state_update`
+               #[cfg(any(test, fuzzing))]
+               self.build_commitment_no_state_update(logger);
+
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+               let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+               let (signature, htlc_signatures);
+
+               {
+                       let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
+                       for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
+                               htlcs.push(htlc);
+                       }
+
+                       let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
+                       signature = res.0;
+                       htlc_signatures = res.1;
+
+                       log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
+                               encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
+                               &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
+                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+
+                       for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
+                               log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
+                                       encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+                                       encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
+                                       log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
+                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+                       }
+               }
+
+               Ok((msgs::CommitmentSigned {
+                       channel_id: self.context.channel_id,
+                       signature,
+                       htlc_signatures,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+               }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
+       }
+
+       /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+       /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
+       ///
+       /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
+       /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+       pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+               &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+               source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+               fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+       ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+       where F::Target: FeeEstimator, L::Target: Logger
+       {
+               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
+                       onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
+               if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+               match send_res? {
+                       Some(_) => {
+                               let monitor_update = self.build_commitment_no_status_check(logger);
+                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+                               Ok(self.push_ret_blockable_mon_update(monitor_update))
+                       },
+                       None => Ok(None)
+               }
+       }
+
+       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
+               if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
+                       return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
+               }
+               self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+                       fee_base_msat: msg.contents.fee_base_msat,
+                       fee_proportional_millionths: msg.contents.fee_proportional_millionths,
+                       cltv_expiry_delta: msg.contents.cltv_expiry_delta
+               });
+
+               Ok(())
+       }
+
+       /// Begins the shutdown process, getting a message for the remote peer and returning all
+       /// holding cell HTLCs for payment failure.
+       ///
+       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+       /// [`ChannelMonitorUpdate`] will be returned).
+       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+               target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
+       -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+       where SP::Target: SignerProvider {
+               for htlc in self.context.pending_outbound_htlcs.iter() {
+                       if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+                               return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
+                       }
+               }
+               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
+                       if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
+                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+                       }
+                       else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
+                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
+                       }
+               }
+               if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+                       return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+               }
+               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+               if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+                       return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
+               }
+
+               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+               // script is set, we just force-close and call it a day.
+               let mut chan_closed = false;
+               if self.context.channel_state < ChannelState::FundingSent as u32 {
+                       chan_closed = true;
+               }
+
+               let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+                       Some(_) => false,
+                       None if !chan_closed => {
+                               // use override shutdown script if provided
+                               let shutdown_scriptpubkey = match override_shutdown_script {
+                                       Some(script) => script,
+                                       None => {
+                                               // otherwise, use the shutdown scriptpubkey provided by the signer
+                                               match signer_provider.get_shutdown_scriptpubkey() {
+                                                       Ok(scriptpubkey) => scriptpubkey,
+                                                       Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+                                               }
+                                       },
+                               };
+                               if !shutdown_scriptpubkey.is_compatible(their_features) {
+                                       return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+                               }
+                               self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+                               true
+                       },
+                       None => false,
+               };
+
+               // From here on out, we may not fail!
+               self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
+               if self.context.channel_state < ChannelState::FundingSent as u32 {
+                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
+               } else {
+                       self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+               }
+               self.context.update_time_counter += 1;
+
+               let monitor_update = if update_shutdown_script {
+                       self.context.latest_monitor_update_id += 1;
+                       let monitor_update = ChannelMonitorUpdate {
+                               update_id: self.context.latest_monitor_update_id,
+                               updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+                                       scriptpubkey: self.get_closing_scriptpubkey(),
+                               }],
+                       };
+                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+                       self.push_ret_blockable_mon_update(monitor_update)
+               } else { None };
+               let shutdown = msgs::Shutdown {
+                       channel_id: self.context.channel_id,
+                       scriptpubkey: self.get_closing_scriptpubkey(),
+               };
+
+               // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
+               // our shutdown until we've committed all of the pending changes.
+               self.context.holding_cell_update_fee = None;
+               let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+               self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+                       match htlc_update {
+                               &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+                                       dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+                                       false
+                               },
+                               _ => true
+                       }
+               });
+
+               debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+                       "we can't both complete shutdown and return a monitor update");
+
+               Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+       }
+
+       pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
+               self.context.holding_cell_htlc_updates.iter()
+                       .flat_map(|htlc_update| {
+                               match htlc_update {
+                                       HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
+                                               => Some((source, payment_hash)),
+                                       _ => None,
+                               }
+                       })
+                       .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
+       }
+}
+
+/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
+pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
+       pub context: ChannelContext<Signer>,
+}
+
+impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
+       pub fn new<ES: Deref, SP: Deref, F: Deref>(
+               fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+               channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
+               outbound_scid_alias: u64
+       ) -> Result<OutboundV1Channel<Signer>, APIError>
+       where ES::Target: EntropySource,
+             SP::Target: SignerProvider<Signer = Signer>,
+             F::Target: FeeEstimator,
+       {
+               let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
+               let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+               let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+               let pubkeys = holder_signer.pubkeys().clone();
+
+               if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+                       return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+               }
+               if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+                       return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+               }
+               let channel_value_msat = channel_value_satoshis * 1000;
+               if push_msat > channel_value_msat {
+                       return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+               }
+               if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
+                       return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+               }
+               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       // Protocol level safety check in place, although it should never happen because
+                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+                       return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+               }
+
+               let channel_type = Self::get_initial_channel_type(&config, their_features);
+               debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+
+               let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+               let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
+               let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
+               if value_to_self_msat < commitment_tx_fee {
+                       return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+               }
+
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+               let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+                       match signer_provider.get_shutdown_scriptpubkey() {
+                               Ok(scriptpubkey) => Some(scriptpubkey),
+                               Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+                       }
+               } else { None };
+
+               if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+                       if !shutdown_scriptpubkey.is_compatible(&their_features) {
+                               return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+                       }
+               }
+
+               let destination_script = match signer_provider.get_destination_script() {
+                       Ok(script) => script,
+                       Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+               };
+
+               let temporary_channel_id = entropy_source.get_secure_random_bytes();
+
+               Ok(Self {
+                       context: ChannelContext {
+                               user_id,
+
+                               config: LegacyChannelConfig {
+                                       options: config.channel_config.clone(),
+                                       announced_channel: config.channel_handshake_config.announced_channel,
+                                       commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+                               },
+
+                               prev_config: None,
+
+                               inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
+
+                               channel_id: temporary_channel_id,
+                               temporary_channel_id: Some(temporary_channel_id),
+                               channel_state: ChannelState::OurInitSent as u32,
+                               announcement_sigs_state: AnnouncementSigsState::NotSent,
+                               secp_ctx,
+                               channel_value_satoshis,
+
+                               latest_monitor_update_id: 0,
+
+                               holder_signer,
+                               shutdown_scriptpubkey,
+                               destination_script,
+
+                               cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                               cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                               value_to_self_msat,
+
+                               pending_inbound_htlcs: Vec::new(),
+                               pending_outbound_htlcs: Vec::new(),
+                               holding_cell_htlc_updates: Vec::new(),
+                               pending_update_fee: None,
+                               holding_cell_update_fee: None,
+                               next_holder_htlc_id: 0,
+                               next_counterparty_htlc_id: 0,
+                               update_time_counter: 1,
+
+                               resend_order: RAACommitmentOrder::CommitmentFirst,
+
+                               monitor_pending_channel_ready: false,
+                               monitor_pending_revoke_and_ack: false,
+                               monitor_pending_commitment_signed: false,
+                               monitor_pending_forwards: Vec::new(),
+                               monitor_pending_failures: Vec::new(),
+                               monitor_pending_finalized_fulfills: Vec::new(),
+
+                               #[cfg(debug_assertions)]
+                               holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+                               #[cfg(debug_assertions)]
+                               counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+
+                               last_sent_closing_fee: None,
+                               pending_counterparty_closing_signed: None,
+                               closing_fee_limits: None,
+                               target_closing_feerate_sats_per_kw: None,
+
+                               inbound_awaiting_accept: false,
+
+                               funding_tx_confirmed_in: None,
+                               funding_tx_confirmation_height: 0,
+                               short_channel_id: None,
+                               channel_creation_height: current_chain_height,
+
+                               feerate_per_kw: feerate,
+                               counterparty_dust_limit_satoshis: 0,
+                               holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+                               counterparty_max_htlc_value_in_flight_msat: 0,
+                               holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
+                               counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
+                               holder_selected_channel_reserve_satoshis,
+                               counterparty_htlc_minimum_msat: 0,
+                               holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+                               counterparty_max_accepted_htlcs: 0,
+                               holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+                               minimum_depth: None, // Filled in in accept_channel
+
+                               counterparty_forwarding_info: None,
+
+                               channel_transaction_parameters: ChannelTransactionParameters {
+                                       holder_pubkeys: pubkeys,
+                                       holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+                                       is_outbound_from_holder: true,
+                                       counterparty_parameters: None,
+                                       funding_outpoint: None,
+                                       channel_type_features: channel_type.clone()
+                               },
+                               funding_transaction: None,
+
+                               counterparty_cur_commitment_point: None,
+                               counterparty_prev_commitment_point: None,
+                               counterparty_node_id,
+
+                               counterparty_shutdown_scriptpubkey: None,
+
+                               commitment_secrets: CounterpartyCommitmentSecrets::new(),
+
+                               channel_update_status: ChannelUpdateStatus::Enabled,
+                               closing_signed_in_flight: false,
+
+                               announcement_sigs: None,
+
+                               #[cfg(any(test, fuzzing))]
+                               next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+                               #[cfg(any(test, fuzzing))]
+                               next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+
+                               workaround_lnd_bug_4006: None,
+                               sent_message_awaiting_response: None,
+
+                               latest_inbound_scid_alias: None,
+                               outbound_scid_alias,
+
+                               channel_pending_event_emitted: false,
+                               channel_ready_event_emitted: false,
+
+                               #[cfg(any(test, fuzzing))]
+                               historical_inbound_htlc_fulfills: HashSet::new(),
+
+                               channel_type,
+                               channel_keys_id,
+
+                               blocked_monitor_updates: Vec::new(),
+                       }
+               })
+       }
+
+       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+       fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+               Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
+       }
+
+       /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
+       /// a funding_created message for the remote peer.
+       /// Panics if called at some time other than immediately after initial handshake, if called twice,
+       /// or if called on an inbound channel.
+       /// Note that channel_id changes during this call!
+       /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
+       /// If an Err is returned, it is a ChannelError::Close.
+       pub fn get_outbound_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L)
+       -> Result<(Channel<Signer>, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger {
+               if !self.context.is_outbound() {
+                       panic!("Tried to create outbound funding_created message on an inbound channel!");
+               }
+               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+                       panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
+               }
+               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+               }
+
+               self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+               self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+
+               let signature = match self.get_outbound_funding_created_signature(logger) {
+                       Ok(res) => res,
+                       Err(e) => {
+                               log_error!(logger, "Got bad signatures: {:?}!", e);
+                               self.context.channel_transaction_parameters.funding_outpoint = None;
+                               return Err((self, e));
+                       }
+               };
+
+               let temporary_channel_id = self.context.channel_id;
+
+               // Now that we're past error-generating stuff, update our local state:
+
+               self.context.channel_state = ChannelState::FundingCreated as u32;
+               self.context.channel_id = funding_txo.to_channel_id();
+               self.context.funding_transaction = Some(funding_transaction);
+
+               let channel = Channel {
+                       context: self.context,
+               };
+
+               Ok((channel, msgs::FundingCreated {
+                       temporary_channel_id,
+                       funding_txid: funding_txo.txid,
+                       funding_output_index: funding_txo.index,
+                       signature,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+                       #[cfg(taproot)]
+                       next_local_nonce: None,
+               }))
+       }
+
+       fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
+               // The default channel type (ie the first one we try) depends on whether the channel is
+               // public - if it is, we just go with `only_static_remotekey` as it's the only option
+               // available. If it's private, we first try `scid_privacy` as it provides better privacy
+               // with no other changes, and fall back to `only_static_remotekey`.
+               let mut ret = ChannelTypeFeatures::only_static_remote_key();
+               if !config.channel_handshake_config.announced_channel &&
+                       config.channel_handshake_config.negotiate_scid_privacy &&
+                       their_features.supports_scid_privacy() {
+                       ret.set_scid_privacy_required();
+               }
+
+               // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
+               // set it now. If they don't understand it, we'll fall back to our default of
+               // `only_static_remotekey`.
+               if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+                       their_features.supports_anchors_zero_fee_htlc_tx() {
+                       ret.set_anchors_zero_fee_htlc_tx_required();
+               }
+
+               ret
+       }
+
+       /// If we receive an error message, it may only be a rejection of the channel type we tried,
+       /// not of our ability to open any channel at all. Thus, on error, we should first call this
+       /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
+       pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result<msgs::OpenChannel, ()> {
+               if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
+               if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
+                       // We've exhausted our options
+                       return Err(());
+               }
+               // We support opening a few different types of channels. Try removing our additional
+               // features one by one until we've either arrived at our default or the counterparty has
+               // accepted one.
+               //
+               // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
+               // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
+               // checks whether the counterparty supports every feature, this would only happen if the
+               // counterparty is advertising the feature, but rejecting channels proposing the feature for
+               // whatever reason.
+               if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
+                       assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
+               } else if self.context.channel_type.supports_scid_privacy() {
+                       self.context.channel_type.clear_scid_privacy();
+               } else {
+                       self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
+               }
+               self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
+               Ok(self.get_open_channel(chain_hash))
+       }
 
        pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
                if !self.context.is_outbound() {
@@ -5638,802 +5822,679 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                }
        }
 
-       pub fn inbound_is_awaiting_accept(&self) -> bool {
-               self.context.inbound_awaiting_accept
-       }
+       // Message handlers
+       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
+               let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
 
-       /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
-       pub fn set_0conf(&mut self) {
-               assert!(self.context.inbound_awaiting_accept);
-               self.context.minimum_depth = Some(0);
-       }
+               // Check sanity of message fields:
+               if !self.context.is_outbound() {
+                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
+               }
+               if self.context.channel_state != ChannelState::OurInitSent as u32 {
+                       return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
+               }
+               if msg.dust_limit_satoshis > 21000000 * 100000000 {
+                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
+               }
+               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
+               }
+               if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
+               }
+               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+                               msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
+               }
+               let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+               }
+               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if msg.to_self_delay > max_delay_acceptable {
+                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+               }
+               if msg.max_accepted_htlcs < 1 {
+                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+               }
+               if msg.max_accepted_htlcs > MAX_HTLCS {
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+               }
 
-       /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
-       /// should be sent back to the counterparty node.
-       ///
-       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
-       pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
-               if self.context.is_outbound() {
-                       panic!("Tried to send accept_channel for an outbound channel?");
+               // Now check against optional parameters as set by config...
+               if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
                }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
-                       panic!("Tried to send accept_channel after channel had moved forward");
+               if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
                }
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Tried to send an accept_channel for a channel that has already advanced");
+               if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
                }
-               if !self.context.inbound_awaiting_accept {
-                       panic!("The inbound channel has already been accepted");
+               if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+               }
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if msg.minimum_depth > peer_limits.max_minimum_depth {
+                       return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
                }
 
-               self.context.user_id = user_id;
-               self.context.inbound_awaiting_accept = false;
+               if let Some(ty) = &msg.channel_type {
+                       if *ty != self.context.channel_type {
+                               return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+                       }
+               } else if their_features.supports_channel_type() {
+                       // Assume they've accepted the channel type as they said they understand it.
+               } else {
+                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
+                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       }
+                       self.context.channel_type = channel_type.clone();
+                       self.context.channel_transaction_parameters.channel_type_features = channel_type;
+               }
 
-               self.generate_accept_channel_message()
-       }
+               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+                       match &msg.shutdown_scriptpubkey {
+                               &Some(ref script) => {
+                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+                                       if script.len() == 0 {
+                                               None
+                                       } else {
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+                                               }
+                                               Some(script.clone())
+                                       }
+                               },
+                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+                               &None => {
+                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                               }
+                       }
+               } else { None };
 
-       /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
-       /// inbound channel. If the intention is to accept an inbound channel, use
-       /// [`Channel::accept_inbound_channel`] instead.
-       ///
-       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
-       fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
-               let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-               let keys = self.context.get_holder_pubkeys();
+               self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
+               self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
+               self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
+               self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
+               self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
 
-               msgs::AcceptChannel {
-                       temporary_channel_id: self.context.channel_id,
-                       dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
-                       max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
-                       channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
-                       htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
-                       minimum_depth: self.context.minimum_depth.unwrap(),
-                       to_self_delay: self.context.get_holder_selected_contest_delay(),
-                       max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
-                       funding_pubkey: keys.funding_pubkey,
-                       revocation_basepoint: keys.revocation_basepoint,
-                       payment_point: keys.payment_point,
-                       delayed_payment_basepoint: keys.delayed_payment_basepoint,
-                       htlc_basepoint: keys.htlc_basepoint,
-                       first_per_commitment_point,
-                       shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
-                               Some(script) => script.clone().into_inner(),
-                               None => Builder::new().into_script(),
-                       }),
-                       channel_type: Some(self.context.channel_type.clone()),
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
+               if peer_limits.trust_own_funding_0conf {
+                       self.context.minimum_depth = Some(msg.minimum_depth);
+               } else {
+                       self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
                }
-       }
 
-       /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
-       /// inbound channel without accepting it.
-       ///
-       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
-       #[cfg(test)]
-       pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
-               self.generate_accept_channel_message()
-       }
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: msg.funding_pubkey,
+                       revocation_basepoint: msg.revocation_basepoint,
+                       payment_point: msg.payment_point,
+                       delayed_payment_basepoint: msg.delayed_payment_basepoint,
+                       htlc_basepoint: msg.htlc_basepoint
+               };
 
-       /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
-       fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
-               Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
+               self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+                       selected_contest_delay: msg.to_self_delay,
+                       pubkeys: counterparty_pubkeys,
+               });
+
+               self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
+               self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+               self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+               self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+               Ok(())
        }
+}
 
-       /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
-       /// a funding_created message for the remote peer.
-       /// Panics if called at some time other than immediately after initial handshake, if called twice,
-       /// or if called on an inbound channel.
-       /// Note that channel_id changes during this call!
-       /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
-       /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
-               if !self.context.is_outbound() {
-                       panic!("Tried to create outbound funding_created message on an inbound channel!");
-               }
-               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
-                       panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
-               }
-               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
-                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
-                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
-               }
+/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
+pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
+       pub context: ChannelContext<Signer>,
+}
 
-               self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
-               self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
+       /// Creates a new channel from a remote sides' request for one.
+       /// Assumes chain_hash has already been checked and corresponds with what we expect!
+       pub fn new<ES: Deref, SP: Deref, F: Deref, L: Deref>(
+               fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+               counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
+               their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
+               current_chain_height: u32, logger: &L, outbound_scid_alias: u64
+       ) -> Result<InboundV1Channel<Signer>, ChannelError>
+               where ES::Target: EntropySource,
+                         SP::Target: SignerProvider<Signer = Signer>,
+                         F::Target: FeeEstimator,
+                         L::Target: Logger,
+       {
+               let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 
-               let signature = match self.get_outbound_funding_created_signature(logger) {
-                       Ok(res) => res,
-                       Err(e) => {
-                               log_error!(logger, "Got bad signatures: {:?}!", e);
-                               self.context.channel_transaction_parameters.funding_outpoint = None;
-                               return Err(e);
+               // First check the channel type is known, failing before we do anything else if we don't
+               // support this channel type.
+               let channel_type = if let Some(channel_type) = &msg.channel_type {
+                       if channel_type.supports_any_optional_bits() {
+                               return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
                        }
-               };
-
-               let temporary_channel_id = self.context.channel_id;
 
-               // Now that we're past error-generating stuff, update our local state:
+                       // We only support the channel types defined by the `ChannelManager` in
+                       // `provided_channel_type_features`. The channel type must always support
+                       // `static_remote_key`.
+                       if !channel_type.requires_static_remote_key() {
+                               return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+                       }
+                       // Make sure we support all of the features behind the channel type.
+                       if !channel_type.is_subset(our_supported_features) {
+                               return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+                       }
+                       if channel_type.requires_scid_privacy() && announced_channel {
+                               return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+                       }
+                       channel_type.clone()
+               } else {
+                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
+                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       }
+                       channel_type
+               };
 
-               self.context.channel_state = ChannelState::FundingCreated as u32;
-               self.context.channel_id = funding_txo.to_channel_id();
-               self.context.funding_transaction = Some(funding_transaction);
+               let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
+               let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
+               let pubkeys = holder_signer.pubkeys().clone();
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: msg.funding_pubkey,
+                       revocation_basepoint: msg.revocation_basepoint,
+                       payment_point: msg.payment_point,
+                       delayed_payment_basepoint: msg.delayed_payment_basepoint,
+                       htlc_basepoint: msg.htlc_basepoint
+               };
 
-               Ok(msgs::FundingCreated {
-                       temporary_channel_id,
-                       funding_txid: funding_txo.txid,
-                       funding_output_index: funding_txo.index,
-                       signature,
-                       #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
-               })
-       }
+               if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+                       return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+               }
 
-       /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
-       /// announceable and available for use (have exchanged ChannelReady messages in both
-       /// directions). Should be used for both broadcasted announcements and in response to an
-       /// AnnouncementSignatures message from the remote peer.
-       ///
-       /// Will only fail if we're not in a state where channel_announcement may be sent (including
-       /// closing).
-       ///
-       /// This will only return ChannelError::Ignore upon failure.
-       fn get_channel_announcement<NS: Deref>(
-               &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
-       ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
-               if !self.context.config.announced_channel {
-                       return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+               // Check sanity of message fields:
+               if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+                       return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
                }
-               if !self.context.is_usable() {
-                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+               if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+                       return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
+               }
+               if msg.channel_reserve_satoshis > msg.funding_satoshis {
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
+               }
+               let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.push_msat > full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
                }
+               if msg.dust_limit_satoshis > msg.funding_satoshis {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
+               }
+               if msg.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+               }
+               Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
 
-               let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
-                       .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
-               let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
-               let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
+               let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if msg.to_self_delay > max_counterparty_selected_contest_delay {
+                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
+               }
+               if msg.max_accepted_htlcs < 1 {
+                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+               }
+               if msg.max_accepted_htlcs > MAX_HTLCS {
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+               }
 
-               let msg = msgs::UnsignedChannelAnnouncement {
-                       features: channelmanager::provided_channel_features(&user_config),
-                       chain_hash,
-                       short_channel_id: self.context.get_short_channel_id().unwrap(),
-                       node_id_1: if were_node_one { node_id } else { counterparty_node_id },
-                       node_id_2: if were_node_one { counterparty_node_id } else { node_id },
-                       bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
-                       bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
-                       excess_data: Vec::new(),
-               };
+               // Now check against optional parameters as set by config...
+               if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+                       return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+               }
+               if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
+               }
+               if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+               }
+               if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+               }
+               if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+               }
+               if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
 
-               Ok(msg)
-       }
+               // Convert things into internal flags and prep our state:
 
-       fn get_announcement_sigs<NS: Deref, L: Deref>(
-               &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
-               best_block_height: u32, logger: &L
-       ) -> Option<msgs::AnnouncementSignatures>
-       where
-               NS::Target: NodeSigner,
-               L::Target: Logger
-       {
-               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
-                       return None;
+               if config.channel_handshake_limits.force_announced_channel_preference {
+                       if config.channel_handshake_config.announced_channel != announced_channel {
+                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+                       }
                }
 
-               if !self.context.is_usable() {
-                       return None;
+               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       // Protocol level safety check in place, although it should never happen because
+                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
+               }
+               if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
+                               msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+               }
+               if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
+                       return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
                }
 
-               if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
-                       log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
-                       return None;
+               // check if the funder's amount for the initial commitment tx is sufficient
+               // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+               let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
+               let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
+               if funders_amount_msat / 1000 < commitment_tx_fee {
+                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
                }
 
-               if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
-                       return None;
+               let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
+               // While it's reasonable for us to not meet the channel reserve initially (if they don't
+               // want to push much to us), our counterparty should always have more than our reserve.
+               if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
                }
 
-               log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
-               let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
-                       Ok(a) => a,
-                       Err(e) => {
-                               log_trace!(logger, "{:?}", e);
-                               return None;
+               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+                       match &msg.shutdown_scriptpubkey {
+                               &Some(ref script) => {
+                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+                                       if script.len() == 0 {
+                                               None
+                                       } else {
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+                                               }
+                                               Some(script.clone())
+                                       }
+                               },
+                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+                               &None => {
+                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                               }
                        }
-               };
-               let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
-                       Err(_) => {
-                               log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
-                               return None;
-                       },
-                       Ok(v) => v
-               };
-               let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
-                       Err(_) => {
-                               log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
-                               return None;
-                       },
-                       Ok(v) => v
-               };
-               self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
-
-               Some(msgs::AnnouncementSignatures {
-                       channel_id: self.context.channel_id(),
-                       short_channel_id: self.context.get_short_channel_id().unwrap(),
-                       node_signature: our_node_sig,
-                       bitcoin_signature: our_bitcoin_sig,
-               })
-       }
-
-       /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
-       /// available.
-       fn sign_channel_announcement<NS: Deref>(
-               &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
-       ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
-               if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
-                       let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
-                               .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
-                       let were_node_one = announcement.node_id_1 == our_node_key;
+               } else { None };
 
-                       let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
-                               .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
-                       let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
-                               .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
-                       Ok(msgs::ChannelAnnouncement {
-                               node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
-                               node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
-                               bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
-                               bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
-                               contents: announcement,
-                       })
-               } else {
-                       Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+               let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+                       match signer_provider.get_shutdown_scriptpubkey() {
+                               Ok(scriptpubkey) => Some(scriptpubkey),
+                               Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+                       }
+               } else { None };
+
+               if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+                       if !shutdown_scriptpubkey.is_compatible(&their_features) {
+                               return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+                       }
                }
-       }
 
-       /// Processes an incoming announcement_signatures message, providing a fully-signed
-       /// channel_announcement message which we can broadcast and storing our counterparty's
-       /// signatures for later reconstruction/rebroadcast of the channel_announcement.
-       pub fn announcement_signatures<NS: Deref>(
-               &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
-               msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
-       ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
-               let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+               let destination_script = match signer_provider.get_destination_script() {
+                       Ok(script) => script,
+                       Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+               };
 
-               let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 
-               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
-                       return Err(ChannelError::Close(format!(
-                               "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
-                                &announcement, self.context.get_counterparty_node_id())));
-               }
-               if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
-                       return Err(ChannelError::Close(format!(
-                               "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
-                               &announcement, self.context.counterparty_funding_pubkey())));
-               }
+               let chan = Self {
+                       context: ChannelContext {
+                               user_id,
 
-               self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
-               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
-                       return Err(ChannelError::Ignore(
-                               "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
-               }
+                               config: LegacyChannelConfig {
+                                       options: config.channel_config.clone(),
+                                       announced_channel,
+                                       commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+                               },
 
-               self.sign_channel_announcement(node_signer, announcement)
-       }
+                               prev_config: None,
 
-       /// Gets a signed channel_announcement for this channel, if we previously received an
-       /// announcement_signatures from our counterparty.
-       pub fn get_signed_channel_announcement<NS: Deref>(
-               &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
-       ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
-               if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
-                       return None;
-               }
-               let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
-                       Ok(res) => res,
-                       Err(_) => return None,
-               };
-               match self.sign_channel_announcement(node_signer, announcement) {
-                       Ok(res) => Some(res),
-                       Err(_) => None,
-               }
-       }
+                               inbound_handshake_limits_override: None,
 
-       /// May panic if called on a channel that wasn't immediately-previously
-       /// self.remove_uncommitted_htlcs_and_mark_paused()'d
-       pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
-               assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
-               assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
-               // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
-               // current to_remote balances. However, it no longer has any use, and thus is now simply
-               // set to a dummy (but valid, as required by the spec) public key.
-               // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
-               // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
-               // valid, and valid in fuzzing mode's arbitrary validity criteria:
-               let mut pk = [2; 33]; pk[1] = 0xff;
-               let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
-               let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
-                       let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
-                       log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
-                       remote_last_secret
-               } else {
-                       log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
-                       [0;32]
-               };
-               self.mark_awaiting_response();
-               msgs::ChannelReestablish {
-                       channel_id: self.context.channel_id(),
-                       // The protocol has two different commitment number concepts - the "commitment
-                       // transaction number", which starts from 0 and counts up, and the "revocation key
-                       // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
-                       // commitment transaction numbers by the index which will be used to reveal the
-                       // revocation key for that commitment transaction, which means we have to convert them
-                       // to protocol-level commitment numbers here...
+                               temporary_channel_id: Some(msg.temporary_channel_id),
+                               channel_id: msg.temporary_channel_id,
+                               channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+                               announcement_sigs_state: AnnouncementSigsState::NotSent,
+                               secp_ctx,
 
-                       // next_local_commitment_number is the next commitment_signed number we expect to
-                       // receive (indicating if they need to resend one that we missed).
-                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
-                       // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
-                       // receive, however we track it by the next commitment number for a remote transaction
-                       // (which is one further, as they always revoke previous commitment transaction, not
-                       // the one we send) so we have to decrement by 1. Note that if
-                       // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
-                       // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
-                       // overflow here.
-                       next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
-                       your_last_per_commitment_secret: remote_last_secret,
-                       my_current_per_commitment_point: dummy_pubkey,
-                       // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
-                       // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
-                       // txid of that interactive transaction, else we MUST NOT set it.
-                       next_funding_txid: None,
-               }
-       }
+                               latest_monitor_update_id: 0,
 
+                               holder_signer,
+                               shutdown_scriptpubkey,
+                               destination_script,
 
-       // Send stuff to our remote peers:
+                               cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                               cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                               value_to_self_msat: msg.push_msat,
 
-       /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
-       /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
-       /// commitment update.
-       ///
-       /// `Err`s will only be [`ChannelError::Ignore`].
-       pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, logger: &L)
-       -> Result<(), ChannelError> where L::Target: Logger {
-               self
-                       .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
-                       .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
-                       .map_err(|err| {
-                               if let ChannelError::Ignore(_) = err { /* fine */ }
-                               else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
-                               err
-                       })
-       }
+                               pending_inbound_htlcs: Vec::new(),
+                               pending_outbound_htlcs: Vec::new(),
+                               holding_cell_htlc_updates: Vec::new(),
+                               pending_update_fee: None,
+                               holding_cell_update_fee: None,
+                               next_holder_htlc_id: 0,
+                               next_counterparty_htlc_id: 0,
+                               update_time_counter: 1,
 
-       /// Adds a pending outbound HTLC to this channel, note that you probably want
-       /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
-       ///
-       /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
-       /// the wire:
-       /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
-       ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
-       ///   awaiting ACK.
-       /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
-       ///   we may not yet have sent the previous commitment update messages and will need to
-       ///   regenerate them.
-       ///
-       /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
-       /// on this [`Channel`] if `force_holding_cell` is false.
-       ///
-       /// `Err`s will only be [`ChannelError::Ignore`].
-       fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
-               onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
-       -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
-               if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
-                       return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
-               }
-               let channel_total_msat = self.context.channel_value_satoshis * 1000;
-               if amount_msat > channel_total_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
-               }
+                               resend_order: RAACommitmentOrder::CommitmentFirst,
 
-               if amount_msat == 0 {
-                       return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
-               }
+                               monitor_pending_channel_ready: false,
+                               monitor_pending_revoke_and_ack: false,
+                               monitor_pending_commitment_signed: false,
+                               monitor_pending_forwards: Vec::new(),
+                               monitor_pending_failures: Vec::new(),
+                               monitor_pending_finalized_fulfills: Vec::new(),
 
-               let available_balances = self.get_available_balances();
-               if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
-                               available_balances.next_outbound_htlc_minimum_msat)));
-               }
+                               #[cfg(debug_assertions)]
+                               holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+                               #[cfg(debug_assertions)]
+                               counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 
-               if amount_msat > available_balances.next_outbound_htlc_limit_msat {
-                       return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
-                               available_balances.next_outbound_htlc_limit_msat)));
-               }
+                               last_sent_closing_fee: None,
+                               pending_counterparty_closing_signed: None,
+                               closing_fee_limits: None,
+                               target_closing_feerate_sats_per_kw: None,
 
-               if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
-                       // Note that this should never really happen, if we're !is_live() on receipt of an
-                       // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
-                       // the user to send directly into a !is_live() channel. However, if we
-                       // disconnected during the time the previous hop was doing the commitment dance we may
-                       // end up getting here after the forwarding delay. In any case, returning an
-                       // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
-                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
-               }
+                               inbound_awaiting_accept: true,
 
-               let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
-               log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
-                       if force_holding_cell { "into holding cell" }
-                       else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
-                       else { "to peer" });
+                               funding_tx_confirmed_in: None,
+                               funding_tx_confirmation_height: 0,
+                               short_channel_id: None,
+                               channel_creation_height: current_chain_height,
 
-               if need_holding_cell {
-                       force_holding_cell = true;
-               }
+                               feerate_per_kw: msg.feerate_per_kw,
+                               channel_value_satoshis: msg.funding_satoshis,
+                               counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
+                               holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+                               counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
+                               holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
+                               counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
+                               holder_selected_channel_reserve_satoshis,
+                               counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
+                               holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+                               counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
+                               holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+                               minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
 
-               // Now update local state:
-               if force_holding_cell {
-                       self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
-                               amount_msat,
-                               payment_hash,
-                               cltv_expiry,
-                               source,
-                               onion_routing_packet,
-                       });
-                       return Ok(None);
-               }
+                               counterparty_forwarding_info: None,
+
+                               channel_transaction_parameters: ChannelTransactionParameters {
+                                       holder_pubkeys: pubkeys,
+                                       holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+                                       is_outbound_from_holder: false,
+                                       counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+                                               selected_contest_delay: msg.to_self_delay,
+                                               pubkeys: counterparty_pubkeys,
+                                       }),
+                                       funding_outpoint: None,
+                                       channel_type_features: channel_type.clone()
+                               },
+                               funding_transaction: None,
 
-               self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
-                       htlc_id: self.context.next_holder_htlc_id,
-                       amount_msat,
-                       payment_hash: payment_hash.clone(),
-                       cltv_expiry,
-                       state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
-                       source,
-               });
+                               counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
+                               counterparty_prev_commitment_point: None,
+                               counterparty_node_id,
 
-               let res = msgs::UpdateAddHTLC {
-                       channel_id: self.context.channel_id,
-                       htlc_id: self.context.next_holder_htlc_id,
-                       amount_msat,
-                       payment_hash,
-                       cltv_expiry,
-                       onion_routing_packet,
-               };
-               self.context.next_holder_htlc_id += 1;
+                               counterparty_shutdown_scriptpubkey,
 
-               Ok(Some(res))
-       }
+                               commitment_secrets: CounterpartyCommitmentSecrets::new(),
 
-       fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
-               log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
-               // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
-               // fail to generate this, we still are at least at a position where upgrading their status
-               // is acceptable.
-               for htlc in self.context.pending_inbound_htlcs.iter_mut() {
-                       let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
-                               Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
-                       } else { None };
-                       if let Some(state) = new_state {
-                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
-                               htlc.state = state;
-                       }
-               }
-               for htlc in self.context.pending_outbound_htlcs.iter_mut() {
-                       if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
-                               log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
-                               // Grab the preimage, if it exists, instead of cloning
-                               let mut reason = OutboundHTLCOutcome::Success(None);
-                               mem::swap(outcome, &mut reason);
-                               htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
-                       }
-               }
-               if let Some((feerate, update_state)) = self.context.pending_update_fee {
-                       if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
-                               debug_assert!(!self.context.is_outbound());
-                               log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
-                               self.context.feerate_per_kw = feerate;
-                               self.context.pending_update_fee = None;
-                       }
-               }
-               self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
+                               channel_update_status: ChannelUpdateStatus::Enabled,
+                               closing_signed_in_flight: false,
 
-               let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
-               let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
-                       htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+                               announcement_sigs: None,
 
-               if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
-                       self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
-               }
+                               #[cfg(any(test, fuzzing))]
+                               next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+                               #[cfg(any(test, fuzzing))]
+                               next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 
-               self.context.latest_monitor_update_id += 1;
-               let monitor_update = ChannelMonitorUpdate {
-                       update_id: self.context.latest_monitor_update_id,
-                       updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
-                               commitment_txid: counterparty_commitment_txid,
-                               htlc_outputs: htlcs.clone(),
-                               commitment_number: self.context.cur_counterparty_commitment_transaction_number,
-                               their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
-                       }]
-               };
-               self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
-               monitor_update
-       }
+                               workaround_lnd_bug_4006: None,
+                               sent_message_awaiting_response: None,
 
-       fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
-               let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+                               latest_inbound_scid_alias: None,
+                               outbound_scid_alias,
 
-               #[cfg(any(test, fuzzing))]
-               {
-                       if !self.context.is_outbound() {
-                               let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
-                               *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
-                               if let Some(info) = projected_commit_tx_info {
-                                       let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
-                                       if info.total_pending_htlcs == total_pending_htlcs
-                                               && info.next_holder_htlc_id == self.context.next_holder_htlc_id
-                                               && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
-                                               && info.feerate == self.context.feerate_per_kw {
-                                                       let actual_fee = Self::commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
-                                                       assert_eq!(actual_fee, info.fee);
-                                               }
-                               }
+                               channel_pending_event_emitted: false,
+                               channel_ready_event_emitted: false,
+
+                               #[cfg(any(test, fuzzing))]
+                               historical_inbound_htlc_fulfills: HashSet::new(),
+
+                               channel_type,
+                               channel_keys_id,
+
+                               blocked_monitor_updates: Vec::new(),
                        }
-               }
+               };
 
-               (counterparty_commitment_txid, commitment_stats.htlcs_included)
+               Ok(chan)
        }
 
-       /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
-       /// generation when we shouldn't change HTLC/channel state.
-       fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
-               // Get the fee tests from `build_commitment_no_state_update`
-               #[cfg(any(test, fuzzing))]
-               self.build_commitment_no_state_update(logger);
+       pub fn is_awaiting_accept(&self) -> bool {
+               self.context.inbound_awaiting_accept
+       }
 
-               let counterparty_keys = self.context.build_remote_transaction_keys();
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
-               let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
-               let (signature, htlc_signatures);
+       /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
+       pub fn set_0conf(&mut self) {
+               assert!(self.context.inbound_awaiting_accept);
+               self.context.minimum_depth = Some(0);
+       }
 
-               {
-                       let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
-                       for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
-                               htlcs.push(htlc);
-                       }
+       /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
+       /// should be sent back to the counterparty node.
+       ///
+       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+       pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
+               if self.context.is_outbound() {
+                       panic!("Tried to send accept_channel for an outbound channel?");
+               }
+               if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+                       panic!("Tried to send accept_channel after channel had moved forward");
+               }
+               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Tried to send an accept_channel for a channel that has already advanced");
+               }
+               if !self.context.inbound_awaiting_accept {
+                       panic!("The inbound channel has already been accepted");
+               }
 
-                       let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
-                       signature = res.0;
-                       htlc_signatures = res.1;
+               self.context.user_id = user_id;
+               self.context.inbound_awaiting_accept = false;
 
-                       log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
-                               encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
-                               &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
-                               log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+               self.generate_accept_channel_message()
+       }
 
-                       for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
-                               log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
-                                       encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
-                                       encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
-                                       log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
-                                       log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
-                       }
-               }
+       /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
+       /// inbound channel. If the intention is to accept an inbound channel, use
+       /// [`InboundV1Channel::accept_inbound_channel`] instead.
+       ///
+       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+       fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
+               let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+               let keys = self.context.get_holder_pubkeys();
 
-               Ok((msgs::CommitmentSigned {
-                       channel_id: self.context.channel_id,
-                       signature,
-                       htlc_signatures,
+               msgs::AcceptChannel {
+                       temporary_channel_id: self.context.channel_id,
+                       dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+                       max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+                       channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+                       htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+                       minimum_depth: self.context.minimum_depth.unwrap(),
+                       to_self_delay: self.context.get_holder_selected_contest_delay(),
+                       max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+                       funding_pubkey: keys.funding_pubkey,
+                       revocation_basepoint: keys.revocation_basepoint,
+                       payment_point: keys.payment_point,
+                       delayed_payment_basepoint: keys.delayed_payment_basepoint,
+                       htlc_basepoint: keys.htlc_basepoint,
+                       first_per_commitment_point,
+                       shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+                               Some(script) => script.clone().into_inner(),
+                               None => Builder::new().into_script(),
+                       }),
+                       channel_type: Some(self.context.channel_type.clone()),
                        #[cfg(taproot)]
-                       partial_signature_with_nonce: None,
-               }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
+                       next_local_nonce: None,
+               }
        }
 
-       /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
-       /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
+       /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
+       /// inbound channel without accepting it.
        ///
-       /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
-       /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
-       pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
-               let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
-               if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
-               match send_res? {
-                       Some(_) => {
-                               let monitor_update = self.build_commitment_no_status_check(logger);
-                               self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
-                               Ok(self.push_ret_blockable_mon_update(monitor_update))
-                       },
-                       None => Ok(None)
-               }
+       /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+       #[cfg(test)]
+       pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
+               self.generate_accept_channel_message()
        }
 
-       pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
-               if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
+       fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
+               let funding_script = self.context.get_funding_redeemscript();
+
+               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+               {
+                       let trusted_tx = initial_commitment_tx.trust();
+                       let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+                       let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+                       // They sign the holder commitment transaction...
+                       log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
+                               log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
+                               encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
+                               encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+                       secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
                }
-               self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
-                       fee_base_msat: msg.contents.fee_base_msat,
-                       fee_proportional_millionths: msg.contents.fee_proportional_millionths,
-                       cltv_expiry_delta: msg.contents.cltv_expiry_delta
-               });
 
-               Ok(())
+               let counterparty_keys = self.context.build_remote_transaction_keys();
+               let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+
+               let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+               let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+               log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+                       log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+               let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
+
+               // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
+               Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
        }
 
-       /// Begins the shutdown process, getting a message for the remote peer and returning all
-       /// holding cell HTLCs for payment failure.
-       ///
-       /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
-       /// [`ChannelMonitorUpdate`] will be returned).
-       pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
-               target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-       -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
-       where SP::Target: SignerProvider {
-               for htlc in self.context.pending_outbound_htlcs.iter() {
-                       if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
-                               return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
-                       }
+       pub fn funding_created<SP: Deref, L: Deref>(
+               mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
+       ) -> Result<(Channel<Signer>, msgs::FundingSigned, ChannelMonitor<Signer>), (Self, ChannelError)>
+       where
+               SP::Target: SignerProvider<Signer = Signer>,
+               L::Target: Logger
+       {
+               if self.context.is_outbound() {
+                       return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
                }
-               if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
-                       if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
-                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
-                       }
-                       else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
-                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
-                       }
+               if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+                       // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
+                       // remember the channel, so it's safe to just send an error_message here and drop the
+                       // channel.
+                       return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
                }
-               if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
-                       return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+               if self.context.inbound_awaiting_accept {
+                       return Err((self, ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned())));
                }
-               assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-               if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
-                       return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
+               if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+                               self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
-               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
-               // script is set, we just force-close and call it a day.
-               let mut chan_closed = false;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       chan_closed = true;
-               }
+               let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
+               self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+               // This is an externally observable change before we finish all our checks.  In particular
+               // funding_created_signature may fail.
+               self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
 
-               let update_shutdown_script = match self.context.shutdown_scriptpubkey {
-                       Some(_) => false,
-                       None if !chan_closed => {
-                               // use override shutdown script if provided
-                               let shutdown_scriptpubkey = match override_shutdown_script {
-                                       Some(script) => script,
-                                       None => {
-                                               // otherwise, use the shutdown scriptpubkey provided by the signer
-                                               match signer_provider.get_shutdown_scriptpubkey() {
-                                                       Ok(scriptpubkey) => scriptpubkey,
-                                                       Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
-                                               }
-                                       },
-                               };
-                               if !shutdown_scriptpubkey.is_compatible(their_features) {
-                                       return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
-                               }
-                               self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
-                               true
+               let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
+                       Ok(res) => res,
+                       Err(ChannelError::Close(e)) => {
+                               self.context.channel_transaction_parameters.funding_outpoint = None;
+                               return Err((self, ChannelError::Close(e)));
                        },
-                       None => false,
+                       Err(e) => {
+                               // The only error we know how to handle is ChannelError::Close, so we fall over here
+                               // to make sure we don't continue with an inconsistent state.
+                               panic!("unexpected error type from funding_created_signature {:?}", e);
+                       }
                };
 
-               // From here on out, we may not fail!
-               self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
-               if self.context.channel_state < ChannelState::FundingSent as u32 {
-                       self.context.channel_state = ChannelState::ShutdownComplete as u32;
-               } else {
-                       self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
-               }
-               self.context.update_time_counter += 1;
+               let holder_commitment_tx = HolderCommitmentTransaction::new(
+                       initial_commitment_tx,
+                       msg.signature,
+                       Vec::new(),
+                       &self.context.get_holder_pubkeys().funding_pubkey,
+                       self.context.counterparty_funding_pubkey()
+               );
 
-               let monitor_update = if update_shutdown_script {
-                       self.context.latest_monitor_update_id += 1;
-                       let monitor_update = ChannelMonitorUpdate {
-                               update_id: self.context.latest_monitor_update_id,
-                               updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
-                                       scriptpubkey: self.get_closing_scriptpubkey(),
-                               }],
-                       };
-                       self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
-                       if self.push_blockable_mon_update(monitor_update) {
-                               self.context.pending_monitor_updates.last().map(|upd| &upd.update)
-                       } else { None }
-               } else { None };
-               let shutdown = msgs::Shutdown {
-                       channel_id: self.context.channel_id,
-                       scriptpubkey: self.get_closing_scriptpubkey(),
-               };
+               if let Err(_) = self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
+                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+               }
 
-               // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
-               // our shutdown until we've committed all of the pending changes.
-               self.context.holding_cell_update_fee = None;
-               let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
-               self.context.holding_cell_htlc_updates.retain(|htlc_update| {
-                       match htlc_update {
-                               &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
-                                       dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
-                                       false
-                               },
-                               _ => true
-                       }
-               });
+               // Now that we're past error-generating stuff, update our local state:
 
-               debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
-                       "we can't both complete shutdown and return a monitor update");
+               let funding_redeemscript = self.context.get_funding_redeemscript();
+               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+               let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+               let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+               monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+               let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+                                                         shutdown_script, self.context.get_holder_selected_contest_delay(),
+                                                         &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
+                                                         &self.context.channel_transaction_parameters,
+                                                         funding_redeemscript.clone(), self.context.channel_value_satoshis,
+                                                         obscure_factor,
+                                                         holder_commitment_tx, best_block, self.context.counterparty_node_id);
 
-               Ok((shutdown, monitor_update, dropped_outbound_htlcs))
-       }
+               channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
 
-       /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
-       /// shutdown of this channel - no more calls into this Channel may be made afterwards except
-       /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
-       /// Also returns the list of payment_hashes for channels which we can safely fail backwards
-       /// immediately (others we will have to allow to time out).
-       pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
-               // Note that we MUST only generate a monitor update that indicates force-closure - we're
-               // called during initialization prior to the chain_monitor in the encompassing ChannelManager
-               // being fully configured in some cases. Thus, its likely any monitor events we generate will
-               // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
-               assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
+               self.context.channel_state = ChannelState::FundingSent as u32;
+               self.context.channel_id = funding_txo.to_channel_id();
+               self.context.cur_counterparty_commitment_transaction_number -= 1;
+               self.context.cur_holder_commitment_transaction_number -= 1;
 
-               // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
-               // return them to fail the payment.
-               let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
-               let counterparty_node_id = self.context.get_counterparty_node_id();
-               for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
-                       match htlc_update {
-                               HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
-                                       dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
-                               },
-                               _ => {}
-                       }
-               }
-               let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
-                       // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
-                       // returning a channel monitor update here would imply a channel monitor update before
-                       // we even registered the channel monitor to begin with, which is invalid.
-                       // Thus, if we aren't actually at a point where we could conceivably broadcast the
-                       // funding transaction, don't return a funding txo (which prevents providing the
-                       // monitor update to the user, even if we return one).
-                       // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
-                               self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
-                               Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
-                                       update_id: self.context.latest_monitor_update_id,
-                                       updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
-                               }))
-                       } else { None }
-               } else { None };
+               log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
 
-               self.context.channel_state = ChannelState::ShutdownComplete as u32;
-               self.context.update_time_counter += 1;
-               (monitor_update, dropped_outbound_htlcs)
-       }
+               // Promote the channel to a full-fledged one now that we have updated the state and have a
+               // `ChannelMonitor`.
+               let mut channel = Channel {
+                       context: self.context,
+               };
+               let channel_id = channel.context.channel_id.clone();
+               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 
-       pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
-               self.context.holding_cell_htlc_updates.iter()
-                       .flat_map(|htlc_update| {
-                               match htlc_update {
-                                       HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
-                                               => Some((source, payment_hash)),
-                                       _ => None,
-                               }
-                       })
-                       .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
+               Ok((channel, msgs::FundingSigned {
+                       channel_id,
+                       signature,
+                       #[cfg(taproot)]
+                       partial_signature_with_nonce: None,
+               }, channel_monitor))
        }
 }
 
@@ -6573,9 +6634,10 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                }
 
                let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
+               let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
 
                (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
-               for htlc in self.context.pending_outbound_htlcs.iter() {
+               for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
                        htlc.htlc_id.write(writer)?;
                        htlc.amount_msat.write(writer)?;
                        htlc.cltv_expiry.write(writer)?;
@@ -6611,18 +6673,37 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                                        reason.write(writer)?;
                                }
                        }
+                       if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
+                               if pending_outbound_skimmed_fees.is_empty() {
+                                       for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
+                               }
+                               pending_outbound_skimmed_fees.push(Some(skimmed_fee));
+                       } else if !pending_outbound_skimmed_fees.is_empty() {
+                               pending_outbound_skimmed_fees.push(None);
+                       }
                }
 
+               let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
                (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
-               for update in self.context.holding_cell_htlc_updates.iter() {
+               for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
                        match update {
-                               &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
+                               &HTLCUpdateAwaitingACK::AddHTLC {
+                                       ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+                                       skimmed_fee_msat,
+                               } => {
                                        0u8.write(writer)?;
                                        amount_msat.write(writer)?;
                                        cltv_expiry.write(writer)?;
                                        payment_hash.write(writer)?;
                                        source.write(writer)?;
                                        onion_routing_packet.write(writer)?;
+
+                                       if let Some(skimmed_fee) = skimmed_fee_msat {
+                                               if holding_cell_skimmed_fees.is_empty() {
+                                                       for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
+                                               }
+                                               holding_cell_skimmed_fees.push(Some(skimmed_fee));
+                                       } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
                                },
                                &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
                                        1u8.write(writer)?;
@@ -6775,10 +6856,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                        (5, self.context.config, required),
                        (6, serialized_holder_htlc_max_in_flight, option),
                        (7, self.context.shutdown_scriptpubkey, option),
+                       (8, self.context.blocked_monitor_updates, optional_vec),
                        (9, self.context.target_closing_feerate_sats_per_kw, option),
-                       (11, self.context.monitor_pending_finalized_fulfills, vec_type),
+                       (11, self.context.monitor_pending_finalized_fulfills, required_vec),
                        (13, self.context.channel_creation_height, required),
-                       (15, preimages, vec_type),
+                       (15, preimages, required_vec),
                        (17, self.context.announcement_sigs_state, required),
                        (19, self.context.latest_inbound_scid_alias, option),
                        (21, self.context.outbound_scid_alias, required),
@@ -6788,7 +6870,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Writeable for Channel<Signer> {
                        (28, holder_max_accepted_htlcs, option),
                        (29, self.context.temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, self.context.pending_monitor_updates, vec_type),
+                       (35, pending_outbound_skimmed_fees, optional_vec),
+                       (37, holding_cell_skimmed_fees, optional_vec),
                });
 
                Ok(())
@@ -6899,6 +6982,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        },
                                        _ => return Err(DecodeError::InvalidValue),
                                },
+                               skimmed_fee_msat: None,
                        });
                }
 
@@ -6912,6 +6996,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                        payment_hash: Readable::read(reader)?,
                                        source: Readable::read(reader)?,
                                        onion_routing_packet: Readable::read(reader)?,
+                                       skimmed_fee_msat: None,
                                },
                                1 => HTLCUpdateAwaitingACK::ClaimHTLC {
                                        payment_preimage: Readable::read(reader)?,
@@ -7008,7 +7093,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        _ => return Err(DecodeError::InvalidValue),
                };
 
-               let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
+               let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
                let funding_transaction = Readable::read(reader)?;
 
                let counterparty_cur_commitment_point = Readable::read(reader)?;
@@ -7065,7 +7150,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut temporary_channel_id: Option<[u8; 32]> = None;
                let mut holder_max_accepted_htlcs: Option<u16> = None;
 
-               let mut pending_monitor_updates = Some(Vec::new());
+               let mut blocked_monitor_updates = Some(Vec::new());
+
+               let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+               let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
 
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
@@ -7076,10 +7164,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
                        (6, holder_max_htlc_value_in_flight_msat, option),
                        (7, shutdown_scriptpubkey, option),
+                       (8, blocked_monitor_updates, optional_vec),
                        (9, target_closing_feerate_sats_per_kw, option),
-                       (11, monitor_pending_finalized_fulfills, vec_type),
+                       (11, monitor_pending_finalized_fulfills, optional_vec),
                        (13, channel_creation_height, option),
-                       (15, preimages_opt, vec_type),
+                       (15, preimages_opt, optional_vec),
                        (17, announcement_sigs_state, option),
                        (19, latest_inbound_scid_alias, option),
                        (21, outbound_scid_alias, option),
@@ -7089,7 +7178,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (28, holder_max_accepted_htlcs, option),
                        (29, temporary_channel_id, option),
                        (31, channel_pending_event_emitted, option),
-                       (33, pending_monitor_updates, vec_type),
+                       (35, pending_outbound_skimmed_fees_opt, optional_vec),
+                       (37, holding_cell_skimmed_fees_opt, optional_vec),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -7134,6 +7224,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        return Err(DecodeError::UnknownRequiredFeature);
                }
 
+               // ChannelTransactionParameters may have had an empty features set upon deserialization.
+               // To account for that, we're proactively setting/overriding the field here.
+               channel_parameters.channel_type_features = chan_features.clone();
+
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 
@@ -7144,6 +7238,25 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
 
+               if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
+                       let mut iter = skimmed_fees.into_iter();
+                       for htlc in pending_outbound_htlcs.iter_mut() {
+                               htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+                       }
+                       // We expect all skimmed fees to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+               if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
+                       let mut iter = skimmed_fees.into_iter();
+                       for htlc in holding_cell_htlc_updates.iter_mut() {
+                               if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
+                                       *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+                               }
+                       }
+                       // We expect all skimmed fees to be consumed above
+                       if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+               }
+
                Ok(Channel {
                        context: ChannelContext {
                                user_id,
@@ -7261,7 +7374,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
 
-                               pending_monitor_updates: pending_monitor_updates.unwrap(),
+                               blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        }
                })
        }
@@ -7278,9 +7391,8 @@ mod tests {
        use hex;
        use crate::ln::PaymentHash;
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
-       #[cfg(anchors)]
        use crate::ln::channel::InitFeatures;
-       use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
+       use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
        use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
        use crate::ln::features::ChannelTypeFeatures;
        use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
@@ -7368,7 +7480,7 @@ mod tests {
                }
        }
 
-       #[cfg(not(feature = "grind_signatures"))]
+       #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
        fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
                PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
        }
@@ -7389,7 +7501,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               match Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
+               match OutboundV1Channel::<EnforcingSigner>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
                        Err(APIError::IncompatibleShutdownScript { script }) => {
                                assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
                        },
@@ -7412,7 +7524,7 @@ mod tests {
 
                let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
 
                // Now change the fee so we can check that the fee in the open_channel message is the
                // same as the old fee.
@@ -7431,6 +7543,7 @@ mod tests {
                let network = Network::Testnet;
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
                let logger = test_utils::TestLogger::new();
+               let best_block = BestBlock::from_network(network);
 
                // Go through the flow of opening a channel between two nodes, making sure
                // they have different dust limits.
@@ -7438,13 +7551,13 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
                let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
-               let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+               let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
 
                // Node B --> Node A: accept channel, explicitly setting B's dust limit.
                let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
@@ -7452,6 +7565,18 @@ mod tests {
                node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
                node_a_chan.context.holder_dust_limit_satoshis = 1560;
 
+               // Node A --> Node B: funding created
+               let output_script = node_a_chan.context.get_funding_redeemscript();
+               let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: 10000000, script_pubkey: output_script.clone(),
+               }]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+
+               // Node B --> Node A: funding signed
+               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
+
                // Put some inbound and outbound HTLCs in A's channel.
                let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
                node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
@@ -7473,22 +7598,23 @@ mod tests {
                                session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
                                first_hop_htlc_msat: 548,
                                payment_id: PaymentId([42; 32]),
-                       }
+                       },
+                       skimmed_fee_msat: None,
                });
 
                // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
                // the dust limit check.
                let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
-               let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
-               let local_commit_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.opt_anchors());
+               let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+               let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
                assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
 
                // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
                // of the HTLCs are seen to be above the dust limit.
                node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
-               let remote_commit_fee_3_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.opt_anchors());
+               let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
                let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
-               let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+               let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
        }
 
@@ -7506,36 +7632,36 @@ mod tests {
 
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut chan = Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut chan = OutboundV1Channel::<EnforcingSigner>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
 
-               let commitment_tx_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.opt_anchors());
-               let commitment_tx_fee_1_htlc = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.opt_anchors());
+               let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
+               let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
 
                // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
                // counted as dust when it shouldn't be.
-               let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
+               let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
-               let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+               let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
 
                // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
-               let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
+               let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
-               let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+               let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
 
                chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
 
                // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
-               let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
+               let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
-               let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+               let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
 
                // If swapped: this HTLC would be counted as dust when it shouldn't be.
-               let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.opt_anchors()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
+               let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
                let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
-               let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+               let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
                assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
        }
 
@@ -7555,12 +7681,12 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
-               let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+               let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
 
                // Node B --> Node A: accept channel
                let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
@@ -7572,11 +7698,11 @@ mod tests {
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
 
                // Node B --> Node A: funding signed
-               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);
+               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
 
                // Now disconnect the two nodes and check that the commitment point in
                // Node B's channel_reestablish message is sane.
@@ -7615,55 +7741,55 @@ mod tests {
                let mut config_101_percent = UserConfig::default();
                config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
 
-               // Test that `new_outbound` creates a channel with the correct value for
+               // Test that `OutboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
                // which is set to the lower bound + 1 (2%) of the `channel_value`.
-               let chan_1 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
+               let chan_1 = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
                let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
                assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
 
                // Test with the upper bound - 1 of valid values (99%).
-               let chan_2 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
+               let chan_2 = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
                let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
                assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
 
                let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash());
 
-               // Test that `new_from_req` creates a channel with the correct value for
+               // Test that `InboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
                // which is set to the lower bound - 1 (2%) of the `channel_value`.
-               let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
+               let chan_3 = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
                let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
                assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
 
                // Test with the upper bound - 1 of valid values (99%).
-               let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
+               let chan_4 = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
                let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
                assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
 
-               // Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
+               // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
                // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
-               let chan_5 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
+               let chan_5 = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
                let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
                assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
 
-               // Test that `new_outbound` uses the upper bound of the configurable percentage values
+               // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
                // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
                // than 100.
-               let chan_6 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
+               let chan_6 = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
                let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
                assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
 
-               // Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
+               // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
                // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
-               let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
+               let chan_7 = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
                let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
                assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
 
-               // Test that `new_from_req` uses the upper bound of the configurable percentage values
+               // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
                // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
                // than 100.
-               let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
+               let chan_8 = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
                let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
                assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
        }
@@ -7671,7 +7797,7 @@ mod tests {
        #[test]
        fn test_configured_holder_selected_channel_reserve_satoshis() {
 
-               // Test that `new_outbound` and `new_from_req` create a channel with the correct
+               // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
                // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
                test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
 
@@ -7703,7 +7829,7 @@ mod tests {
 
                let mut outbound_node_config = UserConfig::default();
                outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
-               let chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
+               let chan = OutboundV1Channel::<EnforcingSigner>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
 
                let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
                assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
@@ -7713,7 +7839,7 @@ mod tests {
                inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
 
                if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
-                       let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
+                       let chan_inbound_node = InboundV1Channel::<EnforcingSigner>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
 
                        let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
 
@@ -7721,7 +7847,7 @@ mod tests {
                        assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
                } else {
                        // Channel Negotiations failed
-                       let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
+                       let result = InboundV1Channel::<EnforcingSigner>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
                        assert!(result.is_err());
                }
        }
@@ -7729,19 +7855,42 @@ mod tests {
        #[test]
        fn channel_update() {
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+               let logger = test_utils::TestLogger::new();
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
                let chain_hash = genesis_block(network).header.block_hash();
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
-               // Create a channel.
+               // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
-               assert!(node_a_chan.context.counterparty_forwarding_info.is_none());
-               assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1); // the default
-               assert!(node_a_chan.context.counterparty_forwarding_info().is_none());
+               let mut node_a_chan = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
+
+               // Create Node B's channel by receiving Node A's open_channel message
+               // Make sure A's dust limit is as we expect.
+               let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+               let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+               let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+
+               // Node B --> Node A: accept channel, explicitly setting B's dust limit.
+               let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
+               accept_channel_msg.dust_limit_satoshis = 546;
+               node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
+               node_a_chan.context.holder_dust_limit_satoshis = 1560;
+
+               // Node A --> Node B: funding created
+               let output_script = node_a_chan.context.get_funding_redeemscript();
+               let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: 10000000, script_pubkey: output_script.clone(),
+               }]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let (mut node_a_chan, funding_created_msg) = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap();
+               let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
+
+               // Node B --> Node A: funding signed
+               let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger).unwrap();
 
                // Make sure that receiving a channel update will update the Channel as expected.
                let update = ChannelUpdate {
@@ -7817,7 +7966,7 @@ mod tests {
                let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let mut config = UserConfig::default();
                config.channel_handshake_config.announced_channel = false;
-               let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+               let mut chan = OutboundV1Channel::<InMemorySigner>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
                chan.context.holder_dust_limit_satoshis = 546;
                chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
 
@@ -7858,15 +8007,15 @@ mod tests {
 
                macro_rules! test_commitment {
                        ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
-                               chan.context.channel_transaction_parameters.opt_anchors = None;
-                               test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, false, $($remain)*);
+                               chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+                               test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
                        };
                }
 
                macro_rules! test_commitment_with_anchors {
                        ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
-                               chan.context.channel_transaction_parameters.opt_anchors = Some(());
-                               test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, true, $($remain)*);
+                               chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
+                               test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
                        };
                }
 
@@ -7925,9 +8074,9 @@ mod tests {
                                        let ref htlc = htlcs[$htlc_idx];
                                        let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
                                                chan.context.get_counterparty_selected_contest_delay().unwrap(),
-                                               &htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+                                               &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
-                                       let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+                                       let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
                                        let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
                                        assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
 
@@ -7944,7 +8093,7 @@ mod tests {
                                        }
 
                                        let htlc_sig = htlc_sig_iter.next().unwrap();
-                                       let num_anchors = if $opt_anchors { 2 } else { 0 };
+                                       let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
                                        assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
 
                                        let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
@@ -8007,6 +8156,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
                        out
@@ -8019,6 +8169,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
                        out
@@ -8263,6 +8414,8 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 2185;
                chan.context.holder_dust_limit_satoshis = 2001;
+               let cached_channel_type = chan.context.channel_type;
+               chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
 
                test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
                                 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
@@ -8283,6 +8436,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 3702;
                chan.context.holder_dust_limit_satoshis = 546;
+               chan.context.channel_type = cached_channel_type.clone();
 
                test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
                                 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
@@ -8317,6 +8471,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 3687;
                chan.context.holder_dust_limit_satoshis = 3001;
+               chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
 
                test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
                                 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
@@ -8332,6 +8487,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 4914;
                chan.context.holder_dust_limit_satoshis = 546;
+               chan.context.channel_type = cached_channel_type.clone();
 
                test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
                                 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
@@ -8356,6 +8512,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 4894;
                chan.context.holder_dust_limit_satoshis = 4001;
+               chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
 
                test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
                                 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
@@ -8365,6 +8522,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 9651180;
                chan.context.holder_dust_limit_satoshis = 546;
+               chan.context.channel_type = cached_channel_type.clone();
 
                test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
                                 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
@@ -8382,6 +8540,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 6216010;
                chan.context.holder_dust_limit_satoshis = 4001;
+               chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
 
                test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
                                 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
@@ -8391,6 +8550,7 @@ mod tests {
                chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
                chan.context.feerate_per_kw = 9651936;
                chan.context.holder_dust_limit_satoshis = 546;
+               chan.context.channel_type = cached_channel_type;
 
                test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
                                 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
@@ -8420,6 +8580,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
                        out
@@ -8432,6 +8593,7 @@ mod tests {
                                payment_hash: PaymentHash([0; 32]),
                                state: OutboundHTLCState::Committed,
                                source: HTLCSource::dummy(),
+                               skimmed_fee_msat: None,
                        };
                        out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
                        out
@@ -8455,6 +8617,7 @@ mod tests {
                                  "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
                } );
 
+               chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
                test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
                                 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
                                 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
@@ -8536,7 +8699,7 @@ mod tests {
 
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, &&keys_provider,
+               let node_a_chan = OutboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider,
                        node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
 
                let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
@@ -8545,13 +8708,12 @@ mod tests {
                let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
                open_channel_msg.channel_type = Some(channel_type_features);
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
-               let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
+               let res = InboundV1Channel::<EnforcingSigner>::new(&feeest, &&keys_provider, &&keys_provider,
                        node_b_node_id, &channelmanager::provided_channel_type_features(&config),
                        &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
                assert!(res.is_ok());
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_supports_anchors_zero_htlc_tx_fee() {
                // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
@@ -8570,7 +8732,7 @@ mod tests {
 
                // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
                // need to signal it.
-               let channel_a = Channel::<EnforcingSigner>::new_outbound(
+               let channel_a = OutboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
                        &config, 0, 42
@@ -8581,13 +8743,13 @@ mod tests {
                expected_channel_type.set_static_remote_key_required();
                expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
 
-               let channel_a = Channel::<EnforcingSigner>::new_outbound(
+               let channel_a = OutboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
                ).unwrap();
 
                let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
-               let channel_b = Channel::<EnforcingSigner>::new_from_req(
+               let channel_b = InboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
                        &open_channel_msg, 7, &config, 0, &&logger, 42
@@ -8597,7 +8759,6 @@ mod tests {
                assert_eq!(channel_b.context.channel_type, expected_channel_type);
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_rejects_implicit_simple_anchors() {
                // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
@@ -8619,7 +8780,7 @@ mod tests {
                let raw_init_features = static_remote_key_required | simple_anchors_required;
                let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
 
-               let channel_a = Channel::<EnforcingSigner>::new_outbound(
+               let channel_a = OutboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
                ).unwrap();
@@ -8630,7 +8791,7 @@ mod tests {
 
                // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
                // `static_remote_key`, it will fail the channel.
-               let channel_b = Channel::<EnforcingSigner>::new_from_req(
+               let channel_b = InboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
                        &open_channel_msg, 7, &config, 0, &&logger, 42
@@ -8638,7 +8799,6 @@ mod tests {
                assert!(channel_b.is_err());
        }
 
-       #[cfg(anchors)]
        #[test]
        fn test_rejects_simple_anchors_channel_type() {
                // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
@@ -8660,13 +8820,13 @@ mod tests {
                let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
                let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
                let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
-               assert!(simple_anchors_init.requires_unknown_bits());
-               assert!(simple_anchors_channel_type.requires_unknown_bits());
+               assert!(!simple_anchors_init.requires_unknown_bits());
+               assert!(!simple_anchors_channel_type.requires_unknown_bits());
 
                // First, we'll try to open a channel between A and B where A requests a channel type for
                // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
                // B as it's not supported by LDK.
-               let channel_a = Channel::<EnforcingSigner>::new_outbound(
+               let channel_a = OutboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
                ).unwrap();
@@ -8674,7 +8834,7 @@ mod tests {
                let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
                open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
 
-               let res = Channel::<EnforcingSigner>::new_from_req(
+               let res = InboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
                        &open_channel_msg, 7, &config, 0, &&logger, 42
@@ -8685,14 +8845,14 @@ mod tests {
                // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
                // original `option_anchors` feature, which should be rejected by A as it's not supported by
                // LDK.
-               let mut channel_a = Channel::<EnforcingSigner>::new_outbound(
+               let mut channel_a = OutboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
                        10000000, 100000, 42, &config, 0, 42
                ).unwrap();
 
                let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
 
-               let channel_b = Channel::<EnforcingSigner>::new_from_req(
+               let channel_b = InboundV1Channel::<EnforcingSigner>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                        &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
                        &open_channel_msg, 7, &config, 0, &&logger, 42