Merge pull request #3125 from valentinewallace/2024-06-async-payments-prefactor
[rust-lightning] / lightning / src / ln / channel.rs
index 58d35a1a95a4ddfc05b29d7fcfe4db5bdf6e89a7..f5c47260f6ef68842a0ff1c0d03e2935aa4a7560 100644 (file)
@@ -7,6 +7,7 @@
 // You may not use this file except in accordance with one or both of these
 // licenses.
 
+use bitcoin::amount::Amount;
 use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
 use bitcoin::blockdata::transaction::Transaction;
@@ -24,12 +25,13 @@ use bitcoin::secp256k1::{PublicKey,SecretKey};
 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
 use bitcoin::secp256k1;
 
-use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
+use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash};
 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::DecodeError;
 use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
+use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
+use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -37,7 +39,7 @@ use crate::chain::BestBlock;
 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
 use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
+use crate::sign::ecdsa::EcdsaChannelSigner;
 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
 use crate::events::ClosureReason;
 use crate::routing::gossip::NodeId;
@@ -185,45 +187,6 @@ enum InboundHTLCState {
        LocalRemoved(InboundHTLCRemovalReason),
 }
 
-/// Exposes the state of pending inbound HTLCs.
-///
-/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
-/// through the following states in the state machine:
-/// - Announced for addition by the originating node through the update_add_htlc message.
-/// - Added to the commitment transaction of the receiving node and originating node in turn
-///   through the exchange of commitment_signed and revoke_and_ack messages.
-/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
-///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
-/// - Removed from the commitment transaction of the originating node and receiving node in turn
-///   through the exchange of commitment_signed and revoke_and_ack messages.
-///
-/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
-#[derive(Clone, Debug, PartialEq)]
-pub enum InboundHTLCStateDetails {
-       /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
-       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
-       /// before this HTLC is included on the remote commitment transaction.
-       AwaitingRemoteRevokeToAdd,
-       /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
-       /// and is included in both commitment transactions.
-       ///
-       /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
-       /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
-       /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
-       /// payment, it will only be claimed together with other required parts.
-       Committed,
-       /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
-       /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
-       /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
-       /// commitment transaction after update_fulfill_htlc.
-       AwaitingRemoteRevokeToRemoveFulfill,
-       /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
-       /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
-       /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
-       /// transaction.
-       AwaitingRemoteRevokeToRemoveFail,
-}
-
 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
        fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
                match state {
@@ -244,13 +207,6 @@ impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
        }
 }
 
-impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
-       (0, AwaitingRemoteRevokeToAdd) => {},
-       (2, Committed) => {},
-       (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
-       (6, AwaitingRemoteRevokeToRemoveFail) => {};
-);
-
 struct InboundHTLCOutput {
        htlc_id: u64,
        amount_msat: u64,
@@ -259,53 +215,6 @@ struct InboundHTLCOutput {
        state: InboundHTLCState,
 }
 
-/// Exposes details around pending inbound HTLCs.
-#[derive(Clone, Debug, PartialEq)]
-pub struct InboundHTLCDetails {
-       /// The HTLC ID.
-       /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
-       /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
-       /// and not part of any commitment transaction.
-       pub htlc_id: u64,
-       /// The amount in msat.
-       pub amount_msat: u64,
-       /// The block height at which this HTLC expires.
-       pub cltv_expiry: u32,
-       /// The payment hash.
-       pub payment_hash: PaymentHash,
-       /// The state of the HTLC in the state machine.
-       ///
-       /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
-       /// waiting for to advance to the next state.
-       ///
-       /// See [`InboundHTLCStateDetails`] for information on the specific states.
-       ///
-       /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
-       /// states may result in `None` here.
-       pub state: Option<InboundHTLCStateDetails>,
-       /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
-       /// from the local commitment transaction and added to the commitment transaction fee.
-       /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
-       /// transactions as well.
-       ///
-       /// When the local commitment transaction is broadcasted as part of a unilateral closure,
-       /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
-       /// fee.
-       ///
-       /// Note that dust limits are specific to each party. An HTLC can be dust for the local
-       /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
-       pub is_dust: bool,
-}
-
-impl_writeable_tlv_based!(InboundHTLCDetails, {
-       (0, htlc_id, required),
-       (2, amount_msat, required),
-       (4, cltv_expiry, required),
-       (6, payment_hash, required),
-       (7, state, upgradable_option),
-       (8, is_dust, required),
-});
-
 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum OutboundHTLCState {
        /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
@@ -339,42 +248,6 @@ enum OutboundHTLCState {
        AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
 }
 
-/// Exposes the state of pending outbound HTLCs.
-///
-/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
-/// through the following states in the state machine:
-/// - Announced for addition by the originating node through the update_add_htlc message.
-/// - Added to the commitment transaction of the receiving node and originating node in turn
-///   through the exchange of commitment_signed and revoke_and_ack messages.
-/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
-///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
-/// - Removed from the commitment transaction of the originating node and receiving node in turn
-///   through the exchange of commitment_signed and revoke_and_ack messages.
-///
-/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
-#[derive(Clone, Debug, PartialEq)]
-pub enum OutboundHTLCStateDetails {
-       /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
-       /// on the remote's commitment transaction after update_add_htlc.
-       AwaitingRemoteRevokeToAdd,
-       /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
-       /// and receiving revoke_and_ack in return.
-       ///
-       /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
-       /// unilaterally close the channel due to a timeout with an uncooperative remote node.
-       Committed,
-       /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
-       /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
-       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
-       /// for the removal from its commitment transaction.
-       AwaitingRemoteRevokeToRemoveSuccess,
-       /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
-       /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
-       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
-       /// for the removal from its commitment transaction.
-       AwaitingRemoteRevokeToRemoveFailure,
-}
-
 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
        fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
                match state {
@@ -398,13 +271,6 @@ impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
        }
 }
 
-impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
-       (0, AwaitingRemoteRevokeToAdd) => {},
-       (2, Committed) => {},
-       (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
-       (6, AwaitingRemoteRevokeToRemoveFailure) => {};
-);
-
 #[derive(Clone)]
 #[cfg_attr(test, derive(Debug, PartialEq))]
 enum OutboundHTLCOutcome {
@@ -443,58 +309,6 @@ struct OutboundHTLCOutput {
        skimmed_fee_msat: Option<u64>,
 }
 
-/// Exposes details around pending outbound HTLCs.
-#[derive(Clone, Debug, PartialEq)]
-pub struct OutboundHTLCDetails {
-       /// The HTLC ID.
-       /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
-       /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
-       /// and not part of any commitment transaction.
-       ///
-       /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
-       pub htlc_id: Option<u64>,
-       /// The amount in msat.
-       pub amount_msat: u64,
-       /// The block height at which this HTLC expires.
-       pub cltv_expiry: u32,
-       /// The payment hash.
-       pub payment_hash: PaymentHash,
-       /// The state of the HTLC in the state machine.
-       ///
-       /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
-       /// waiting for to advance to the next state.
-       ///
-       /// See [`OutboundHTLCStateDetails`] for information on the specific states.
-       ///
-       /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
-       /// states may result in `None` here.
-       pub state: Option<OutboundHTLCStateDetails>,
-       /// The extra fee being skimmed off the top of this HTLC.
-       pub skimmed_fee_msat: Option<u64>,
-       /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
-       /// from the local commitment transaction and added to the commitment transaction fee.
-       /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
-       /// transactions as well.
-       ///
-       /// When the local commitment transaction is broadcasted as part of a unilateral closure,
-       /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
-       /// fee.
-       ///
-       /// Note that dust limits are specific to each party. An HTLC can be dust for the local
-       /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
-       pub is_dust: bool,
-}
-
-impl_writeable_tlv_based!(OutboundHTLCDetails, {
-       (0, htlc_id, required),
-       (2, amount_msat, required),
-       (4, cltv_expiry, required),
-       (6, payment_hash, required),
-       (7, state, upgradable_option),
-       (8, skimmed_fee_msat, required),
-       (10, is_dust, required),
-});
-
 /// See AwaitingRemoteRevoke ChannelState for more info
 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum HTLCUpdateAwaitingACK {
@@ -896,7 +710,7 @@ pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
 pub(super) enum ChannelError {
        Ignore(String),
        Warn(String),
-       Close(String),
+       Close((String, ClosureReason)),
 }
 
 impl fmt::Debug for ChannelError {
@@ -904,7 +718,7 @@ impl fmt::Debug for ChannelError {
                match self {
                        &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
                        &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
-                       &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+                       &ChannelError::Close((ref e, _)) => write!(f, "Close : {}", e),
                }
        }
 }
@@ -914,34 +728,43 @@ impl fmt::Display for ChannelError {
                match self {
                        &ChannelError::Ignore(ref e) => write!(f, "{}", e),
                        &ChannelError::Warn(ref e) => write!(f, "{}", e),
-                       &ChannelError::Close(ref e) => write!(f, "{}", e),
+                       &ChannelError::Close((ref e, _)) => write!(f, "{}", e),
                }
        }
 }
 
+impl ChannelError {
+       pub(super) fn close(err: String) -> Self {
+               ChannelError::Close((err.clone(), ClosureReason::ProcessingError { err }))
+       }
+}
+
 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
        pub logger: &'a L,
        pub peer_id: Option<PublicKey>,
        pub channel_id: Option<ChannelId>,
+       pub payment_hash: Option<PaymentHash>,
 }
 
 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
        fn log(&self, mut record: Record) {
                record.peer_id = self.peer_id;
                record.channel_id = self.channel_id;
+               record.payment_hash = self.payment_hash;
                self.logger.log(record)
        }
 }
 
 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
 where L::Target: Logger {
-       pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
+       pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>, payment_hash: Option<PaymentHash>) -> Self
        where S::Target: SignerProvider
        {
                WithChannelContext {
                        logger,
                        peer_id: Some(context.counterparty_node_id),
                        channel_id: Some(context.channel_id),
+                       payment_hash
                }
        }
 }
@@ -950,7 +773,7 @@ macro_rules! secp_check {
        ($res: expr, $err: expr) => {
                match $res {
                        Ok(thing) => thing,
-                       Err(_) => return Err(ChannelError::Close($err)),
+                       Err(_) => return Err(ChannelError::close($err)),
                }
        };
 }
@@ -998,14 +821,16 @@ enum HTLCInitiator {
        RemoteOffered,
 }
 
-/// An enum gathering stats on pending HTLCs, either inbound or outbound side.
+/// Current counts of various HTLCs, useful for calculating current balances available exactly.
 struct HTLCStats {
-       pending_htlcs: u32,
-       pending_htlcs_value_msat: u64,
+       pending_inbound_htlcs: usize,
+       pending_outbound_htlcs: usize,
+       pending_inbound_htlcs_value_msat: u64,
+       pending_outbound_htlcs_value_msat: u64,
        on_counterparty_tx_dust_exposure_msat: u64,
        on_holder_tx_dust_exposure_msat: u64,
-       holding_cell_msat: u64,
-       on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
+       outbound_holding_cell_msat: u64,
+       on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
 }
 
 /// An enum gathering stats on commitment transaction, either local or remote.
@@ -1114,6 +939,75 @@ pub(crate) struct ShutdownResult {
        pub(crate) channel_funding_txo: Option<OutPoint>,
 }
 
+/// Tracks the transaction number, along with current and next commitment points.
+/// This consolidates the logic to advance our commitment number and request new
+/// commitment points from our signer.
+#[derive(Debug, Copy, Clone)]
+enum HolderCommitmentPoint {
+       // TODO: add a variant for before our first commitment point is retrieved
+       /// We've advanced our commitment number and are waiting on the next commitment point.
+       /// Until the `get_per_commitment_point` signer method becomes async, this variant
+       /// will not be used.
+       PendingNext { transaction_number: u64, current: PublicKey },
+       /// Our current commitment point is ready, we've cached our next point,
+       /// and we are not pending a new one.
+       Available { transaction_number: u64, current: PublicKey, next: PublicKey },
+}
+
+impl HolderCommitmentPoint {
+       pub fn new<SP: Deref>(signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>) -> Self
+               where SP::Target: SignerProvider
+       {
+               HolderCommitmentPoint::Available {
+                       transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx),
+                       next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx),
+               }
+       }
+
+       pub fn is_available(&self) -> bool {
+               if let HolderCommitmentPoint::Available { .. } = self { true } else { false }
+       }
+
+       pub fn transaction_number(&self) -> u64 {
+               match self {
+                       HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number,
+                       HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number,
+               }
+       }
+
+       pub fn current_point(&self) -> PublicKey {
+               match self {
+                       HolderCommitmentPoint::PendingNext { current, .. } => *current,
+                       HolderCommitmentPoint::Available { current, .. } => *current,
+               }
+       }
+
+       pub fn next_point(&self) -> Option<PublicKey> {
+               match self {
+                       HolderCommitmentPoint::PendingNext { .. } => None,
+                       HolderCommitmentPoint::Available { next, .. } => Some(*next),
+               }
+       }
+
+       pub fn advance<SP: Deref, L: Deref>(&mut self, signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>, logger: &L)
+               where SP::Target: SignerProvider, L::Target: Logger
+       {
+               if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self {
+                       *self = HolderCommitmentPoint::PendingNext {
+                               transaction_number: *transaction_number - 1,
+                               current: *next,
+                       };
+               }
+
+               if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self {
+                       let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx);
+                       log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1);
+                       *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next };
+               }
+       }
+}
+
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
@@ -1189,9 +1083,9 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
        UnfundedOutboundV1(OutboundV1Channel<SP>),
        UnfundedInboundV1(InboundV1Channel<SP>),
-       #[cfg(dual_funding)]
+       #[cfg(any(dual_funding, splicing))]
        UnfundedOutboundV2(OutboundV2Channel<SP>),
-       #[cfg(dual_funding)]
+       #[cfg(any(dual_funding, splicing))]
        UnfundedInboundV2(InboundV2Channel<SP>),
        Funded(Channel<SP>),
 }
@@ -1205,9 +1099,9 @@ impl<'a, SP: Deref> ChannelPhase<SP> where
                        ChannelPhase::Funded(chan) => &chan.context,
                        ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
                        ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
                }
        }
@@ -1217,9 +1111,9 @@ impl<'a, SP: Deref> ChannelPhase<SP> where
                        ChannelPhase::Funded(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
                }
        }
@@ -1292,7 +1186,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        // generation start at 0 and count up...this simplifies some parts of implementation at the
        // cost of others, but should really just be changed.
 
-       cur_holder_commitment_transaction_number: u64,
+       holder_commitment_point: HolderCommitmentPoint,
        cur_counterparty_commitment_transaction_number: u64,
        value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
        pending_inbound_htlcs: Vec<InboundHTLCOutput>,
@@ -1398,7 +1292,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        /// Either the height at which this channel was created or the height at which it was last
        /// serialized if it was serialized by versions prior to 0.0.103.
        /// We use this to close if funding is never broadcasted.
-       channel_creation_height: u32,
+       pub(super) channel_creation_height: u32,
 
        counterparty_dust_limit_satoshis: u64,
 
@@ -1569,7 +1463,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        L::Target: Logger,
                        SP::Target: SignerProvider,
        {
-               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
+               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None);
                let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
 
                let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
@@ -1579,90 +1473,90 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let pubkeys = holder_signer.pubkeys().clone();
 
                if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+                       return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
                }
 
                // Check sanity of message fields:
                if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
                                config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
                                open_channel_fields.funding_satoshis, our_funding_satoshis)));
                }
                if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
                }
                if msg_channel_reserve_satoshis > channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
                }
                let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
                if msg_push_msat > full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
+                       return Err(ChannelError::close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
                }
                if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
                }
                if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
+                       return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
                }
                Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
 
                let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
                if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
+                       return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
                }
                if open_channel_fields.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+                       return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
                }
                if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
+                       return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
                }
 
                // Now check against optional parameters as set by config...
                if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
-                       return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+                       return Err(ChannelError::close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
                }
                if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+                       return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
                }
                if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+                       return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
                }
                if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+                       return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
                }
                if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+                       return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
                }
                if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
 
                // Convert things into internal flags and prep our state:
 
                if config.channel_handshake_limits.force_announced_channel_preference {
                        if config.channel_handshake_config.announced_channel != announced_channel {
-                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+                               return Err(ChannelError::close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
                        }
                }
 
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        // Protocol level safety check in place, although it should never happen because
                        // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
+                       return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
                }
                if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
                                msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
                }
                if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+                       return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
                }
 
                // check if the funder's amount for the initial commitment tx is sufficient
@@ -1675,14 +1569,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
                let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
                if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
-                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
+                       return Err(ChannelError::close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
                }
 
                let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
                // While it's reasonable for us to not meet the channel reserve initially (if they don't
                // want to push much to us), our counterparty should always have more than our reserve.
                if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
+                       return Err(ChannelError::close("Insufficient funding amount for initial reserve".to_owned()));
                }
 
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
@@ -1693,14 +1587,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                                None
                                        } else {
                                                if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+                                                       return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
                                                }
                                                Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                                &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                                       return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
                } else { None };
@@ -1708,19 +1602,19 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
                        match signer_provider.get_shutdown_scriptpubkey() {
                                Ok(scriptpubkey) => Some(scriptpubkey),
-                               Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+                               Err(_) => return Err(ChannelError::close("Failed to get upfront shutdown scriptpubkey".to_owned())),
                        }
                } else { None };
 
                if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
                        if !shutdown_scriptpubkey.is_compatible(&their_features) {
-                               return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+                               return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
                        }
                }
 
                let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
                        Ok(script) => script,
-                       Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+                       Err(_) => return Err(ChannelError::close("Failed to get destination script".to_owned())),
                };
 
                let mut secp_ctx = Secp256k1::new();
@@ -1734,6 +1628,9 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
 
+               let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+               let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
                // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
 
                let channel_context = ChannelContext {
@@ -1759,11 +1656,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                        latest_monitor_update_id: 0,
 
-                       holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+                       holder_signer,
                        shutdown_scriptpubkey,
                        destination_script,
 
-                       cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       holder_commitment_point,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat,
 
@@ -1958,6 +1855,9 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
 
+               let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+               let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
                Ok(Self {
                        user_id,
 
@@ -1981,11 +1881,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                        latest_monitor_update_id: 0,
 
-                       holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+                       holder_signer,
                        shutdown_scriptpubkey,
                        destination_script,
 
-                       cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       holder_commitment_point,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat,
 
@@ -2218,8 +2118,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns the holder signer for this channel.
        #[cfg(test)]
-       pub fn get_signer(&self) -> &ChannelSignerType<SP> {
-               return &self.holder_signer
+       pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType<SP> {
+               return &mut self.holder_signer
        }
 
        /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
@@ -2246,6 +2146,143 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                }
        }
 
+       /// Performs checks against necessary constraints after receiving either an `accept_channel` or
+       /// `accept_channel2` message.
+       pub fn do_accept_channel_checks(
+               &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures,
+               common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64,
+       ) -> Result<(), ChannelError> {
+               let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits };
+
+               // Check sanity of message fields:
+               if !self.is_outbound() {
+                       return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
+               }
+               if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
+                       return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
+               }
+               if common_fields.dust_limit_satoshis > 21000000 * 100000000 {
+                       return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis)));
+               }
+               if channel_reserve_satoshis > self.channel_value_satoshis {
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis)));
+               }
+               if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
+               }
+               if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+                               channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
+               }
+               let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000;
+               if common_fields.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat)));
+               }
+               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if common_fields.to_self_delay > max_delay_acceptable {
+                       return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay)));
+               }
+               if common_fields.max_accepted_htlcs < 1 {
+                       return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+               }
+               if common_fields.max_accepted_htlcs > MAX_HTLCS {
+                       return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS)));
+               }
+
+               // Now check against optional parameters as set by config...
+               if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+                       return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+               }
+               if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+               }
+               if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+               }
+               if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+                       return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+               }
+               if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if common_fields.minimum_depth > peer_limits.max_minimum_depth {
+                       return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth)));
+               }
+
+               if let Some(ty) = &common_fields.channel_type {
+                       if *ty != self.channel_type {
+                               return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+                       }
+               } else if their_features.supports_channel_type() {
+                       // Assume they've accepted the channel type as they said they understand it.
+               } else {
+                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
+                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                               return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       }
+                       self.channel_type = channel_type.clone();
+                       self.channel_transaction_parameters.channel_type_features = channel_type;
+               }
+
+               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+                       match &common_fields.shutdown_scriptpubkey {
+                               &Some(ref script) => {
+                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+                                       if script.len() == 0 {
+                                               None
+                                       } else {
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+                                               }
+                                               Some(script.clone())
+                                       }
+                               },
+                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+                               &None => {
+                                       return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                               }
+                       }
+               } else { None };
+
+               self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis;
+               self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
+               self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis);
+               self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat;
+               self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs;
+
+               if peer_limits.trust_own_funding_0conf {
+                       self.minimum_depth = Some(common_fields.minimum_depth);
+               } else {
+                       self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth));
+               }
+
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: common_fields.funding_pubkey,
+                       revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint),
+                       payment_point: common_fields.payment_basepoint,
+                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint),
+                       htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint)
+               };
+
+               self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+                       selected_contest_delay: common_fields.to_self_delay,
+                       pubkeys: counterparty_pubkeys,
+               });
+
+               self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point);
+               self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+               self.channel_state = ChannelState::NegotiatingFunding(
+                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+               );
+               self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+               Ok(())
+       }
+
        /// Returns the block hash in which our funding transaction was confirmed.
        pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
                self.funding_tx_confirmed_in
@@ -2337,15 +2374,16 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
        }
 
-       pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
-               fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
-       where F::Target: FeeEstimator
-       {
+       fn get_dust_exposure_limiting_feerate<F: Deref>(&self,
+               fee_estimator: &LowerBoundedFeeEstimator<F>,
+       ) -> u32 where F::Target: FeeEstimator {
+               fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep)
+       }
+
+       pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 {
                match self.config.options.max_dust_htlc_exposure {
                        MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
-                               let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
-                                       ConfirmationTarget::OnChainSweep) as u64;
-                               feerate_per_kw.saturating_mul(multiplier)
+                               (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier)
                        },
                        MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
                }
@@ -2678,8 +2716,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// our counterparty!)
        /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
        /// TODO Some magic rust shit to compile-time check this?
-       fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
-               let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
+       fn build_holder_transaction_keys(&self) -> TxCreationKeys {
+               let per_commitment_point = self.holder_commitment_point.current_point();
                let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
@@ -2738,86 +2776,111 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                self.counterparty_forwarding_info.clone()
        }
 
-       /// Returns a HTLCStats about inbound pending htlcs
-       fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+       /// Returns a HTLCStats about pending htlcs
+       fn get_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>, dust_exposure_limiting_feerate: u32) -> HTLCStats {
                let context = self;
-               let mut stats = HTLCStats {
-                       pending_htlcs: context.pending_inbound_htlcs.len() as u32,
-                       pending_htlcs_value_msat: 0,
-                       on_counterparty_tx_dust_exposure_msat: 0,
-                       on_holder_tx_dust_exposure_msat: 0,
-                       holding_cell_msat: 0,
-                       on_holder_tx_holding_cell_htlcs_count: 0,
-               };
+               let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx();
 
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+               let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update);
+               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors {
                        (0, 0)
                } else {
-                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+                       (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+                               dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000)
                };
-               let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
-               let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
-               for ref htlc in context.pending_inbound_htlcs.iter() {
-                       stats.pending_htlcs_value_msat += htlc.amount_msat;
-                       if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
-                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
-                       if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
-                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
-               }
-               stats
-       }
 
-       /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
-       fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
-               let context = self;
-               let mut stats = HTLCStats {
-                       pending_htlcs: context.pending_outbound_htlcs.len() as u32,
-                       pending_htlcs_value_msat: 0,
-                       on_counterparty_tx_dust_exposure_msat: 0,
-                       on_holder_tx_dust_exposure_msat: 0,
-                       holding_cell_msat: 0,
-                       on_holder_tx_holding_cell_htlcs_count: 0,
-               };
+               let mut on_holder_tx_dust_exposure_msat = 0;
+               let mut on_counterparty_tx_dust_exposure_msat = 0;
 
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
-                       (0, 0)
-               } else {
-                       let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
-               };
-               let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
-               let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
-               for ref htlc in context.pending_outbound_htlcs.iter() {
-                       stats.pending_htlcs_value_msat += htlc.amount_msat;
-                       if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
-                               stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
-                       }
-                       if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
-                               stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+               let mut on_counterparty_tx_offered_nondust_htlcs = 0;
+               let mut on_counterparty_tx_accepted_nondust_htlcs = 0;
+
+               let mut pending_inbound_htlcs_value_msat = 0;
+
+               {
+                       let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+                       let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+                       for ref htlc in context.pending_inbound_htlcs.iter() {
+                               pending_inbound_htlcs_value_msat += htlc.amount_msat;
+                               if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
+                                       on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+                               } else {
+                                       on_counterparty_tx_offered_nondust_htlcs += 1;
+                               }
+                               if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
+                                       on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+                               }
                        }
                }
 
-               for update in context.holding_cell_htlc_updates.iter() {
-                       if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
-                               stats.pending_htlcs += 1;
-                               stats.pending_htlcs_value_msat += amount_msat;
-                               stats.holding_cell_msat += amount_msat;
-                               if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
-                                       stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
-                               }
-                               if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
-                                       stats.on_holder_tx_dust_exposure_msat += amount_msat;
+               let mut pending_outbound_htlcs_value_msat = 0;
+               let mut outbound_holding_cell_msat = 0;
+               let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0;
+               let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len();
+               {
+                       let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+                       let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+                       for ref htlc in context.pending_outbound_htlcs.iter() {
+                               pending_outbound_htlcs_value_msat += htlc.amount_msat;
+                               if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
+                                       on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
                                } else {
-                                       stats.on_holder_tx_holding_cell_htlcs_count += 1;
+                                       on_counterparty_tx_accepted_nondust_htlcs += 1;
+                               }
+                               if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
+                                       on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+                               }
+                       }
+
+                       for update in context.holding_cell_htlc_updates.iter() {
+                               if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
+                                       pending_outbound_htlcs += 1;
+                                       pending_outbound_htlcs_value_msat += amount_msat;
+                                       outbound_holding_cell_msat += amount_msat;
+                                       if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
+                                               on_counterparty_tx_dust_exposure_msat += amount_msat;
+                                       } else {
+                                               on_counterparty_tx_accepted_nondust_htlcs += 1;
+                                       }
+                                       if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
+                                               on_holder_tx_dust_exposure_msat += amount_msat;
+                                       } else {
+                                               on_holder_tx_outbound_holding_cell_htlcs_count += 1;
+                                       }
                                }
                        }
                }
-               stats
+
+               // Include any mining "excess" fees in the dust calculation
+               let excess_feerate_opt = outbound_feerate_update
+                       .or(self.pending_update_fee.map(|(fee, _)| fee))
+                       .unwrap_or(self.feerate_per_kw)
+                       .checked_sub(dust_exposure_limiting_feerate);
+               if let Some(excess_feerate) = excess_feerate_opt {
+                       let on_counterparty_tx_nondust_htlcs =
+                               on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs;
+                       on_counterparty_tx_dust_exposure_msat +=
+                               commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type);
+                       if !self.channel_type.supports_anchors_zero_fee_htlc_tx() {
+                               on_counterparty_tx_dust_exposure_msat +=
+                                       on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type)
+                                       * excess_feerate as u64 / 1000;
+                               on_counterparty_tx_dust_exposure_msat +=
+                                       on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type)
+                                       * excess_feerate as u64 / 1000;
+                       }
+               }
+
+               HTLCStats {
+                       pending_inbound_htlcs: self.pending_inbound_htlcs.len(),
+                       pending_outbound_htlcs,
+                       pending_inbound_htlcs_value_msat,
+                       pending_outbound_htlcs_value_msat,
+                       on_counterparty_tx_dust_exposure_msat,
+                       on_holder_tx_dust_exposure_msat,
+                       outbound_holding_cell_msat,
+                       on_holder_tx_outbound_holding_cell_htlcs_count,
+               }
        }
 
        /// Returns information on all pending inbound HTLCs.
@@ -2922,9 +2985,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        where F::Target: FeeEstimator
        {
                let context = &self;
-               // Note that we have to handle overflow due to the above case.
-               let inbound_stats = context.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = context.get_outbound_pending_htlc_stats(None);
+               // Note that we have to handle overflow due to the case mentioned in the docs in general
+               // here.
+
+               let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator);
+               let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
 
                let mut balance_msat = context.value_to_self_msat;
                for ref htlc in context.pending_inbound_htlcs.iter() {
@@ -2932,10 +2997,10 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                balance_msat += htlc.amount_msat;
                        }
                }
-               balance_msat -= outbound_stats.pending_htlcs_value_msat;
+               balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat;
 
                let outbound_capacity_msat = context.value_to_self_msat
-                               .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+                               .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat)
                                .saturating_sub(
                                        context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
 
@@ -2995,7 +3060,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                        let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
                        let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
-                               .saturating_sub(inbound_stats.pending_htlcs_value_msat);
+                               .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat);
 
                        if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
                                // If another HTLC's fee would reduce the remote's balance below the reserve limit
@@ -3012,7 +3077,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                // send above the dust limit (as the router can always overpay to meet the dust limit).
                let mut remaining_msat_below_dust_exposure_limit = None;
                let mut dust_exposure_dust_limit_msat = 0;
-               let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
 
                let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
@@ -3021,18 +3086,32 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
                         context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
                };
-               let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
+
+               let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate);
+               if let Some(excess_feerate) = excess_feerate_opt {
+                       let htlc_dust_exposure_msat =
+                               per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type);
+                       let nondust_htlc_counterparty_tx_dust_exposure =
+                               htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
+                       if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+                               // If adding an extra HTLC would put us over the dust limit in total fees, we cannot
+                               // send any non-dust HTLCs.
+                               available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000);
+                       }
+               }
+
+               if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) {
+                       // Note that we don't use the `counterparty_tx_dust_exposure` (with
+                       // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs.
                        remaining_msat_below_dust_exposure_limit =
-                               Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+                               Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
                }
 
-               let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
+               if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
                        remaining_msat_below_dust_exposure_limit = Some(cmp::min(
                                remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
-                               max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
+                               max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat)));
                        dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
                }
 
@@ -3045,16 +3124,16 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                }
 
                available_capacity_msat = cmp::min(available_capacity_msat,
-                       context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
+                       context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat);
 
-               if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
+               if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize {
                        available_capacity_msat = 0;
                }
 
                AvailableBalances {
                        inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
                                        - context.value_to_self_msat as i64
-                                       - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+                                       - htlc_stats.pending_inbound_htlcs_value_msat as i64
                                        - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
                                0) as u64,
                        outbound_capacity_msat,
@@ -3501,7 +3580,7 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe
 ///
 /// This is used both for outbound and inbound channels and has lower bound
 /// of `dust_limit_satoshis`.
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
        // Fixed at 1% of channel value by spec.
        let (q, _) = channel_value_satoshis.overflowing_div(100);
@@ -3523,8 +3602,19 @@ pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
 }
 
+pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 {
+       // Note that we need to divide before multiplying to round properly,
+       // since the lowest denomination of bitcoin on-chain is the satoshi.
+       let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000;
+       if channel_type_features.supports_anchors_zero_fee_htlc_tx() {
+               commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000
+       } else {
+               commitment_tx_fee
+       }
+}
+
 /// Context for dual-funded channels.
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 pub(super) struct DualFundingChannelContext {
        /// The amount in satoshis we will be contributing to the channel.
        pub our_funding_satoshis: u64,
@@ -3541,7 +3631,7 @@ pub(super) struct DualFundingChannelContext {
 // Counterparty designates channel data owned by the another channel participant entity.
 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
        pub context: ChannelContext<SP>,
-       #[cfg(dual_funding)]
+       #[cfg(any(dual_funding, splicing))]
        pub dual_funding_channel_context: Option<DualFundingChannelContext>,
 }
 
@@ -3612,7 +3702,7 @@ impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
 
 impl<SP: Deref> Channel<SP> where
        SP::Target: SignerProvider,
-       <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
+       <SP::Target as SignerProvider>::EcdsaSigner: EcdsaChannelSigner
 {
        fn check_remote_fee<F: Deref, L: Deref>(
                channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
@@ -3634,7 +3724,12 @@ impl<SP: Deref> Channel<SP> where
                                        return Ok(());
                                }
                        }
-                       return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
+                       return Err(ChannelError::Close((format!(
+                               "Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit
+                       ), ClosureReason::PeerFeerateTooLow {
+                               peer_feerate_sat_per_kw: feerate_per_kw,
+                               required_feerate_sat_per_kw: lower_limit,
+                       })));
                }
                Ok(())
        }
@@ -4084,7 +4179,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                        // If we reconnected before sending our `channel_ready` they may still resend theirs.
                        ChannelState::ChannelReady(_) => check_reconnection = true,
-                       _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+                       _ => return Err(ChannelError::close("Peer sent a channel_ready at a strange time".to_owned())),
                }
                if check_reconnection {
                        // They probably disconnected/reconnected and re-sent the channel_ready, which is
@@ -4107,7 +4202,7 @@ impl<SP: Deref> Channel<SP> where
                                                ).expect("We already advanced, so previous secret keys should have been validated already")))
                                };
                        if expected_point != Some(msg.next_per_commitment_point) {
-                               return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+                               return Err(ChannelError::close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
                }
@@ -4120,35 +4215,37 @@ impl<SP: Deref> Channel<SP> where
                Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
        }
 
-       pub fn update_add_htlc(
+       pub fn update_add_htlc<F: Deref>(
                &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
-       ) -> Result<(), ChannelError> {
+               fee_estimator: &LowerBoundedFeeEstimator<F>,
+       ) -> Result<(), ChannelError> where F::Target: FeeEstimator {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
                if self.context.channel_state.is_remote_shutdown_sent() {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
                }
                if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+                       return Err(ChannelError::close("Remote side tried to send more than the total value of the channel".to_owned()));
                }
                if msg.amount_msat == 0 {
-                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+                       return Err(ChannelError::close("Remote side tried to send a 0-msat HTLC".to_owned()));
                }
                if msg.amount_msat < self.context.holder_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+                       return Err(ChannelError::close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
                }
 
-               let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
-               if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
-                       return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+               let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
+               let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
+               if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
+                       return Err(ChannelError::close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
                }
-               if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+               if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
                }
 
                // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
@@ -4173,11 +4270,11 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let pending_value_to_self_msat =
-                       self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+                       self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
                let pending_remote_value_msat =
                        self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
                if pending_remote_value_msat < msg.amount_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+                       return Err(ChannelError::close("Remote HTLC add would overdraw remaining funds".to_owned()));
                }
 
                // Check that the remote can afford to pay for this HTLC on-chain at the current
@@ -4193,10 +4290,10 @@ impl<SP: Deref> Channel<SP> where
                                0
                        };
                        if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
-                               return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+                               return Err(ChannelError::close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
                        };
                        if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
-                               return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+                               return Err(ChannelError::close("Remote HTLC add would put them under remote reserve value".to_owned()));
                        }
                }
 
@@ -4210,14 +4307,14 @@ impl<SP: Deref> Channel<SP> where
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
                        let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                        if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
-                               return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+                               return Err(ChannelError::close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
                        }
                }
                if self.context.next_counterparty_htlc_id != msg.htlc_id {
-                       return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+                       return Err(ChannelError::close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
                }
                if msg.cltv_expiry >= 500000000 {
-                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+                       return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
                if self.context.channel_state.is_local_shutdown_sent() {
@@ -4251,32 +4348,32 @@ impl<SP: Deref> Channel<SP> where
                                        Some(payment_preimage) => {
                                                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
                                                if payment_hash != htlc.payment_hash {
-                                                       return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+                                                       return Err(ChannelError::close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
                                                }
                                                OutboundHTLCOutcome::Success(Some(payment_preimage))
                                        }
                                };
                                match htlc.state {
                                        OutboundHTLCState::LocalAnnounced(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+                                               return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
                                        OutboundHTLCState::Committed => {
                                                htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
                                        },
                                        OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+                                               return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
                                }
                                return Ok(htlc);
                        }
                }
-               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+               Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
@@ -4284,10 +4381,10 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fail HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -4296,10 +4393,10 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -4310,20 +4407,20 @@ impl<SP: Deref> Channel<SP> where
                where L::Target: Logger
        {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+                       return Err(ChannelError::close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let funding_script = self.context.get_funding_redeemscript();
 
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let keys = self.context.build_holder_transaction_keys();
 
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+               let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger);
                let commitment_txid = {
                        let trusted_tx = commitment_stats.tx.trust();
                        let bitcoin_tx = trusted_tx.built_transaction();
@@ -4334,7 +4431,7 @@ impl<SP: Deref> Channel<SP> where
                                log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
                                log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
-                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+                               return Err(ChannelError::close("Invalid commitment tx signature from peer".to_owned()));
                        }
                        bitcoin_tx.txid
                };
@@ -4349,7 +4446,7 @@ impl<SP: Deref> Channel<SP> where
                        debug_assert!(!self.context.is_outbound());
                        let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
                        if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
-                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+                               return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned()));
                        }
                }
                #[cfg(any(test, fuzzing))]
@@ -4371,7 +4468,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
-                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+                       return Err(ChannelError::close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
                }
 
                // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
@@ -4399,12 +4496,12 @@ impl<SP: Deref> Channel<SP> where
 
                                let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
                                let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
-                               let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
+                               let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap()[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
                                        encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
                                if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
-                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+                                       return Err(ChannelError::close("Invalid HTLC tx signature from peer".to_owned()));
                                }
                                if !separate_nondust_htlc_sources {
                                        htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
@@ -4429,7 +4526,7 @@ impl<SP: Deref> Channel<SP> where
                );
 
                self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+                       .map_err(|_| ChannelError::close("Failed to validate our commitment".to_owned()))?;
 
                // Update state now that we've passed all the can-fail calls...
                let mut need_commitment = false;
@@ -4486,7 +4583,7 @@ impl<SP: Deref> Channel<SP> where
                        channel_id: Some(self.context.channel_id()),
                };
 
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
                self.context.expecting_peer_commitment_signed = false;
                // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
                // build_commitment_no_status_check() next which will reset this to RAAFirst.
@@ -4682,20 +4779,20 @@ impl<SP: Deref> Channel<SP> where
        where F::Target: FeeEstimator, L::Target: Logger,
        {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+                       return Err(ChannelError::close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
 
                if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
                        if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
-                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+                               return Err(ChannelError::close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
                        }
                }
 
@@ -4707,7 +4804,7 @@ impl<SP: Deref> Channel<SP> where
                        // lot of work, and there's some chance this is all a misunderstanding anyway.
                        // We have to do *something*, though, since our signer may get mad at us for otherwise
                        // jumping a remote commitment number, so best to just force-close and move on.
-                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+                       return Err(ChannelError::close("Received an unexpected revoke_and_ack".to_owned()));
                }
 
                #[cfg(any(test, fuzzing))]
@@ -4721,7 +4818,7 @@ impl<SP: Deref> Channel<SP> where
                                ecdsa.validate_counterparty_revocation(
                                        self.context.cur_counterparty_commitment_transaction_number + 1,
                                        &secret
-                               ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+                               ).map_err(|_| ChannelError::close("Failed to validate revocation from peer".to_owned()))?;
                        },
                        // TODO (taproot|arik)
                        #[cfg(taproot)]
@@ -4729,7 +4826,7 @@ impl<SP: Deref> Channel<SP> where
                };
 
                self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
-                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+                       .map_err(|_| ChannelError::close("Previous secrets did not match new one".to_owned()))?;
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
@@ -4995,12 +5092,12 @@ impl<SP: Deref> Channel<SP> where
                }
 
                // Before proposing a feerate update, check that we can actually afford the new fee.
-               let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
-               let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
-               let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
-               let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+               let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
+               let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate);
+               let keys = self.context.build_holder_transaction_keys();
+               let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, true, logger);
+               let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
+               let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
                if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
                        //TODO: auto-close after a number of failures?
                        log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
@@ -5008,14 +5105,12 @@ impl<SP: Deref> Channel<SP> where
                }
 
                // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
-               let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-               let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
-               if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
+               if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
-               if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+               if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
                        log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
                        return None;
                }
@@ -5182,12 +5277,7 @@ impl<SP: Deref> Channel<SP> where
                        assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
                                "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
                        self.context.monitor_pending_channel_ready = false;
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-                       Some(msgs::ChannelReady {
-                               channel_id: self.context.channel_id(),
-                               next_per_commitment_point,
-                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                       })
+                       Some(self.get_channel_ready())
                } else { None };
 
                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
@@ -5234,34 +5324,50 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
+       pub fn check_for_stale_feerate<L: Logger>(&mut self, logger: &L, min_feerate: u32) -> Result<(), ClosureReason> {
+               if self.context.is_outbound() {
+                       // While its possible our fee is too low for an outbound channel because we've been
+                       // unable to increase the fee, we don't try to force-close directly here.
+                       return Ok(());
+               }
+               if self.context.feerate_per_kw < min_feerate {
+                       log_info!(logger,
+                               "Closing channel as feerate of {} is below required {} (the minimum required rate over the past day)",
+                               self.context.feerate_per_kw, min_feerate
+                       );
+                       Err(ClosureReason::PeerFeerateTooLow {
+                               peer_feerate_sat_per_kw: self.context.feerate_per_kw,
+                               required_feerate_sat_per_kw: min_feerate,
+                       })
+               } else {
+                       Ok(())
+               }
+       }
+
        pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
                where F::Target: FeeEstimator, L::Target: Logger
        {
                if self.context.is_outbound() {
-                       return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+                       return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
                Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
 
                self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
                self.context.update_time_counter += 1;
                // Check that we won't be pushed over our dust exposure limit by the feerate increase.
-               if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
-                       let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
-                       let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
-                       let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
-                       let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
-                       if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
-                               return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
-                                       msg.feerate_per_kw, holder_tx_dust_exposure)));
-                       }
-                       if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
-                               return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
-                                       msg.feerate_per_kw, counterparty_tx_dust_exposure)));
-                       }
+               let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
+               let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
+               if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
+                       return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+                               msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
+               }
+               if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
+                       return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+                               msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
                }
                Ok(())
        }
@@ -5277,7 +5383,7 @@ impl<SP: Deref> Channel<SP> where
                        self.context.get_funding_signed_msg(logger).1
                } else { None };
                let channel_ready = if funding_signed.is_some() {
-                       self.check_get_channel_ready(0)
+                       self.check_get_channel_ready(0, logger)
                } else { None };
 
                log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
@@ -5293,8 +5399,11 @@ impl<SP: Deref> Channel<SP> where
        }
 
        fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
-               let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-               let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+               debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER + 2);
+               // TODO: handle non-available case when get_per_commitment_point becomes async
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               let next_per_commitment_point = self.context.holder_commitment_point.current_point();
+               let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2);
                msgs::RevokeAndACK {
                        channel_id: self.context.channel_id,
                        per_commitment_secret,
@@ -5419,21 +5528,21 @@ impl<SP: Deref> Channel<SP> where
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
                        // just close here instead of trying to recover.
-                       return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+                       return Err(ChannelError::close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
                }
 
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
+                       return Err(ChannelError::close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
 
-               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1;
                if msg.next_remote_commitment_number > 0 {
                        let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
                        let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
-                               .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+                               .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
                        if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
-                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+                               return Err(ChannelError::close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                        }
                        if msg.next_remote_commitment_number > our_commitment_transaction {
                                macro_rules! log_and_panic {
@@ -5477,7 +5586,7 @@ impl<SP: Deref> Channel<SP> where
                        if !self.context.channel_state.is_our_channel_ready() ||
                                        self.context.channel_state.is_monitor_update_in_progress() {
                                if msg.next_remote_commitment_number != 0 {
-                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
+                                       return Err(ChannelError::close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
@@ -5489,13 +5598,8 @@ impl<SP: Deref> Channel<SP> where
                        }
 
                        // We have OurChannelReady set!
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                        return Ok(ReestablishResponses {
-                               channel_ready: Some(msgs::ChannelReady {
-                                       channel_id: self.context.channel_id(),
-                                       next_per_commitment_point,
-                                       short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                               }),
+                               channel_ready: Some(self.get_channel_ready()),
                                raa: None, commitment_update: None,
                                order: RAACommitmentOrder::CommitmentFirst,
                                shutdown_msg, announcement_sigs,
@@ -5515,7 +5619,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                } else {
                        debug_assert!(false, "All values should have been handled in the four cases above");
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
                                msg.next_remote_commitment_number,
                                our_commitment_transaction
@@ -5532,14 +5636,9 @@ impl<SP: Deref> Channel<SP> where
                }
                let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
 
-               let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+               let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() == 1 {
                        // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-                       Some(msgs::ChannelReady {
-                               channel_id: self.context.channel_id(),
-                               next_per_commitment_point,
-                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                       })
+                       Some(self.get_channel_ready())
                } else { None };
 
                if msg.next_local_commitment_number == next_counterparty_commitment_number {
@@ -5578,13 +5677,13 @@ impl<SP: Deref> Channel<SP> where
                                })
                        }
                } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
-                       Err(ChannelError::Close(format!(
+                       Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
                                msg.next_local_commitment_number,
                                next_counterparty_commitment_number,
                        )))
                } else {
-                       Err(ChannelError::Close(format!(
+                       Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
                                msg.next_local_commitment_number,
                                next_counterparty_commitment_number,
@@ -5659,7 +5758,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
                if self.closing_negotiation_ready() {
                        if self.context.closing_signed_in_flight {
-                               return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+                               return Err(ChannelError::close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
                        } else {
                                self.context.closing_signed_in_flight = true;
                        }
@@ -5704,7 +5803,7 @@ impl<SP: Deref> Channel<SP> where
                        ChannelSignerType::Ecdsa(ecdsa) => {
                                let sig = ecdsa
                                        .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
-                                       .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+                                       .map_err(|()| ChannelError::close("Failed to get signature for closing transaction.".to_owned()))?;
 
                                self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
                                Ok((Some(msgs::ClosingSigned {
@@ -5750,17 +5849,17 @@ impl<SP: Deref> Channel<SP> where
        ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        {
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_pre_funded_state() {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
-                       return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+                       return Err(ChannelError::close("Peer sent shutdown pre-funding generation".to_owned()));
                }
                for htlc in self.context.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
-                               return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+                               return Err(ChannelError::close("Got shutdown with remote pending HTLCs".to_owned()));
                        }
                }
                assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
@@ -5788,10 +5887,10 @@ impl<SP: Deref> Channel<SP> where
                                assert!(send_shutdown);
                                let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
                                        Ok(scriptpubkey) => scriptpubkey,
-                                       Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+                                       Err(_) => return Err(ChannelError::close("Failed to get shutdown scriptpubkey".to_owned())),
                                };
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
-                                       return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+                                       return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
                                }
                                self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
@@ -5873,20 +5972,20 @@ impl<SP: Deref> Channel<SP> where
                where F::Target: FeeEstimator
        {
                if !self.context.channel_state.is_both_sides_shutdown() {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+                       return Err(ChannelError::close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
                }
                if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+                       return Err(ChannelError::close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
                }
                if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
-                       return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+                       return Err(ChannelError::close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
                }
 
                if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
-                       return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+                       return Err(ChannelError::close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
                }
 
                if self.context.channel_state.is_monitor_update_in_progress() {
@@ -5897,7 +5996,7 @@ impl<SP: Deref> Channel<SP> where
                let funding_redeemscript = self.context.get_funding_redeemscript();
                let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
                if used_total_fee != msg.fee_satoshis {
-                       return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+                       return Err(ChannelError::close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
                }
                let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
 
@@ -5913,8 +6012,8 @@ impl<SP: Deref> Channel<SP> where
                };
 
                for outp in closing_tx.trust().built_transaction().output.iter() {
-                       if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
-                               return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+                       if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) {
+                               return Err(ChannelError::close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
                        }
                }
 
@@ -5960,7 +6059,7 @@ impl<SP: Deref> Channel<SP> where
                                        ChannelSignerType::Ecdsa(ecdsa) => {
                                                let sig = ecdsa
                                                        .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
-                                                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+                                                       .map_err(|_| ChannelError::close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
                                                                closure_reason,
@@ -6002,7 +6101,7 @@ impl<SP: Deref> Channel<SP> where
 
                if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
                        if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
-                               return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+                               return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
                        }
                        if max_fee_satoshis < our_min_fee {
                                return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
@@ -6018,7 +6117,7 @@ impl<SP: Deref> Channel<SP> where
                                propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
                        } else {
                                if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
-                                       return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+                                       return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
                                                msg.fee_satoshis, our_min_fee, our_max_fee)));
                                }
                                // The proposed fee is in our acceptable range, accept it and broadcast!
@@ -6034,7 +6133,7 @@ impl<SP: Deref> Channel<SP> where
                                        } else if last_fee < our_max_fee {
                                                propose_fee!(our_max_fee);
                                        } else {
-                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+                                               return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
                                        }
                                } else {
                                        if msg.fee_satoshis > our_min_fee {
@@ -6042,7 +6141,7 @@ impl<SP: Deref> Channel<SP> where
                                        } else if last_fee > our_min_fee {
                                                propose_fee!(our_min_fee);
                                        } else {
-                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+                                               return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
                                        }
                                }
                        } else {
@@ -6105,9 +6204,9 @@ impl<SP: Deref> Channel<SP> where
                        return Err(("Shutdown was already sent", 0x4000|8))
                }
 
-               let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
-               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
+               let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
                let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
                        (0, 0)
                } else {
@@ -6117,17 +6216,27 @@ impl<SP: Deref> Channel<SP> where
                };
                let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
-                       let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+                       let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
                        if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
                                        on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
                                return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
                        }
+               } else {
+                       let htlc_dust_exposure_msat =
+                               per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type);
+                       let counterparty_tx_dust_exposure =
+                               htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
+                       if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+                               log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx",
+                                       counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat);
+                               return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7))
+                       }
                }
 
                let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
                if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
-                       let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+                       let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
                        if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
                                log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
                                        on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
@@ -6151,7 +6260,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let pending_value_to_self_msat =
-                       self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+                       self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
                let pending_remote_value_msat =
                        self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
 
@@ -6175,7 +6284,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
-               self.context.cur_holder_commitment_transaction_number + 1
+               self.context.holder_commitment_point.transaction_number() + 1
        }
 
        pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
@@ -6251,6 +6360,26 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
+       /// On startup, its possible we detect some monitor updates have actually completed (and the
+       /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
+       /// here after logging them.
+       pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
+               let channel_id = self.context.channel_id();
+               self.context.blocked_monitor_updates.retain(|update| {
+                       if update.update.update_id <= loaded_mon_update_id {
+                               log_info!(
+                                       logger,
+                                       "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
+                                       update.update.update_id,
+                                       channel_id,
+                               );
+                               false
+                       } else {
+                               true
+                       }
+               });
+       }
+
        pub fn blocked_monitor_updates_pending(&self) -> usize {
                self.context.blocked_monitor_updates.len()
        }
@@ -6270,7 +6399,7 @@ impl<SP: Deref> Channel<SP> where
                        debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
                        return true;
                }
-               if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+               if self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 &&
                        self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
                        // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
                        // waiting for the initial monitor persistence. Thus, we check if our commitment
@@ -6329,7 +6458,9 @@ impl<SP: Deref> Channel<SP> where
                self.context.channel_update_status = status;
        }
 
-       fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+       fn check_get_channel_ready<L: Deref>(&mut self, height: u32, logger: &L) -> Option<msgs::ChannelReady>
+               where L::Target: Logger
+       {
                // Called:
                //  * always when a new block/transactions are confirmed with the new height
                //  * when funding is signed with a height of 0
@@ -6349,6 +6480,8 @@ impl<SP: Deref> Channel<SP> where
                // If we're still pending the signature on a funding transaction, then we're not ready to send a
                // channel_ready yet.
                if self.context.signer_pending_funding {
+                       // TODO: set signer_pending_channel_ready
+                       log_debug!(logger, "Can't produce channel_ready: the signer is pending funding.");
                        return None;
                }
 
@@ -6381,22 +6514,35 @@ impl<SP: Deref> Channel<SP> where
                        false
                };
 
-               if need_commitment_update {
-                       if !self.context.channel_state.is_monitor_update_in_progress() {
-                               if !self.context.channel_state.is_peer_disconnected() {
-                                       let next_per_commitment_point =
-                                               self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
-                                       return Some(msgs::ChannelReady {
-                                               channel_id: self.context.channel_id,
-                                               next_per_commitment_point,
-                                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                                       });
-                               }
-                       } else {
-                               self.context.monitor_pending_channel_ready = true;
-                       }
+               if !need_commitment_update {
+                       log_debug!(logger, "Not producing channel_ready: we do not need a commitment update");
+                       return None;
+               }
+
+               if self.context.channel_state.is_monitor_update_in_progress() {
+                       log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready.");
+                       self.context.monitor_pending_channel_ready = true;
+                       return None;
+               }
+
+               if self.context.channel_state.is_peer_disconnected() {
+                       log_debug!(logger, "Not producing channel_ready: the peer is disconnected.");
+                       return None;
+               }
+
+               // TODO: when get_per_commiment_point becomes async, check if the point is
+               // available, if not, set signer_pending_channel_ready and return None
+
+               Some(self.get_channel_ready())
+       }
+
+       fn get_channel_ready(&self) -> msgs::ChannelReady {
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               msgs::ChannelReady {
+                       channel_id: self.context.channel_id(),
+                       next_per_commitment_point: self.context.holder_commitment_point.current_point(),
+                       short_channel_id_alias: Some(self.context.outbound_scid_alias),
                }
-               None
        }
 
        /// When a transaction is confirmed, we check whether it is or spends the funding transaction
@@ -6418,8 +6564,8 @@ impl<SP: Deref> Channel<SP> where
                                if self.context.funding_tx_confirmation_height == 0 {
                                        if tx.txid() == funding_txo.txid {
                                                let txo_idx = funding_txo.index as usize;
-                                               if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
-                                                               tx.output[txo_idx].value != self.context.channel_value_satoshis {
+                                               if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_p2wsh() ||
+                                                               tx.output[txo_idx].value.to_sat() != self.context.channel_value_satoshis {
                                                        if self.context.is_outbound() {
                                                                // If we generated the funding transaction and it doesn't match what it
                                                                // should, the client is really broken and we should just panic and
@@ -6434,7 +6580,7 @@ impl<SP: Deref> Channel<SP> where
                                                        return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
                                                } else {
                                                        if self.context.is_outbound() {
-                                                               if !tx.is_coin_base() {
+                                                               if !tx.is_coinbase() {
                                                                        for input in tx.input.iter() {
                                                                                if input.witness.is_empty() {
                                                                                        // We generated a malleable funding transaction, implying we've
@@ -6454,7 +6600,7 @@ impl<SP: Deref> Channel<SP> where
                                                }
                                                // If this is a coinbase transaction and not a 0-conf channel
                                                // we should update our min_depth to 100 to handle coinbase maturity
-                                               if tx.is_coin_base() &&
+                                               if tx.is_coinbase() &&
                                                        self.context.minimum_depth.unwrap_or(0) > 0 &&
                                                        self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
                                                        self.context.minimum_depth = Some(COINBASE_MATURITY);
@@ -6463,7 +6609,7 @@ impl<SP: Deref> Channel<SP> where
                                        // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
-                                       if let Some(channel_ready) = self.check_get_channel_ready(height) {
+                                       if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
                                                log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                                                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
                                                msgs = (Some(channel_ready), announcement_sigs);
@@ -6529,7 +6675,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
 
-               if let Some(channel_ready) = self.check_get_channel_ready(height) {
+               if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
                        let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
                                self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                        } else { None };
@@ -6762,12 +6908,12 @@ impl<SP: Deref> Channel<SP> where
                let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
 
                if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
                                 &announcement, self.context.get_counterparty_node_id())));
                }
                if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
                                &announcement, self.context.counterparty_funding_pubkey())));
                }
@@ -6832,7 +6978,7 @@ impl<SP: Deref> Channel<SP> where
 
                        // next_local_commitment_number is the next commitment_signed number we expect to
                        // receive (indicating if they need to resend one that we missed).
-                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number(),
                        // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
                        // receive, however we track it by the next commitment number for a remote transaction
                        // (which is one further, as they always revoke previous commitment transaction, not
@@ -7384,7 +7530,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7398,7 +7544,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
                // We can skip this if it is a zero-conf channel.
-               if funding_transaction.is_coin_base() &&
+               if funding_transaction.is_coinbase() &&
                        self.context.minimum_depth.unwrap_or(0) > 0 &&
                        self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
                        self.context.minimum_depth = Some(COINBASE_MATURITY);
@@ -7436,6 +7582,12 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                Ok(self.get_open_channel(chain_hash))
        }
 
+       /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again.
+       pub fn is_resumable(&self) -> bool {
+               !self.context.have_received_message() &&
+                       self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER
+       }
+
        pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
                if !self.context.is_outbound() {
                        panic!("Tried to open a channel for an inbound channel?");
@@ -7444,11 +7596,12 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        panic!("Cannot generate an open_channel after we've moved forward");
                }
 
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Tried to send an open_channel for a channel that has already advanced");
                }
 
-               let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               let first_per_commitment_point = self.context.holder_commitment_point.current_point();
                let keys = self.context.get_holder_pubkeys();
 
                msgs::OpenChannel {
@@ -7481,136 +7634,11 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        }
 
        // Message handlers
-       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
-               let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
-               // Check sanity of message fields:
-               if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
-               }
-               if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
-                       return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
-               }
-               if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
-                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
-               }
-               if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
-                               msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
-               if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
-               }
-               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
-               if msg.common_fields.to_self_delay > max_delay_acceptable {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
-               }
-               if msg.common_fields.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
-               }
-               if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
-               }
-
-               // Now check against optional parameters as set by config...
-               if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
-               }
-               if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
-               }
-               if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
-               }
-               if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
-               }
-               if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
-                       return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
-               }
-
-               if let Some(ty) = &msg.common_fields.channel_type {
-                       if *ty != self.context.channel_type {
-                               return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
-                       }
-               } else if their_features.supports_channel_type() {
-                       // Assume they've accepted the channel type as they said they understand it.
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       self.context.channel_type = channel_type.clone();
-                       self.context.channel_transaction_parameters.channel_type_features = channel_type;
-               }
-
-               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
-                       match &msg.common_fields.shutdown_scriptpubkey {
-                               &Some(ref script) => {
-                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
-                                       if script.len() == 0 {
-                                               None
-                                       } else {
-                                               if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
-                                               }
-                                               Some(script.clone())
-                                       }
-                               },
-                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
-                               }
-                       }
-               } else { None };
-
-               self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
-               self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
-               self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
-               self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
-               self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
-
-               if peer_limits.trust_own_funding_0conf {
-                       self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
-               } else {
-                       self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
-               }
-
-               let counterparty_pubkeys = ChannelPublicKeys {
-                       funding_pubkey: msg.common_fields.funding_pubkey,
-                       revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
-                       payment_point: msg.common_fields.payment_basepoint,
-                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
-                       htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
-               };
-
-               self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
-                       selected_contest_delay: msg.common_fields.to_self_delay,
-                       pubkeys: counterparty_pubkeys,
-               });
-
-               self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
-               self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
-
-               self.context.channel_state = ChannelState::NegotiatingFunding(
-                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
-               );
-               self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
-
-               Ok(())
+       pub fn accept_channel(
+               &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits,
+               their_features: &InitFeatures
+       ) -> Result<(), ChannelError> {
+               self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis)
        }
 
        /// Handles a funding_signed message from the remote end.
@@ -7622,14 +7650,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                L::Target: Logger
        {
                if !self.context.is_outbound() {
-                       return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned())));
                }
                if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
-                       return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned())));
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7643,15 +7671,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
                        &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
-               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+               let holder_signer = self.context.build_holder_transaction_keys();
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &holder_signer, true, false, logger).tx;
                {
                        let trusted_tx = initial_commitment_tx.trust();
                        let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                        let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
                        // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
-                               return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+                               return Err((self, ChannelError::close("Invalid funding_signed signature from peer".to_owned())));
                        }
                }
 
@@ -7666,12 +7694,12 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                let validated =
                        self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
                if validated.is_err() {
-                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+                       return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
                }
 
                let funding_redeemscript = self.context.get_funding_redeemscript();
                let funding_txo = self.context.get_funding_txo().unwrap();
-               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               let funding_txo_script = funding_redeemscript.to_p2wsh();
                let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
                let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
                let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
@@ -7697,18 +7725,18 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                } else {
                        self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                }
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
                log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 
                let mut channel = Channel {
                        context: self.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        dual_funding_channel_context: None,
                };
 
-               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
                Ok((channel, channel_monitor))
        }
@@ -7738,28 +7766,28 @@ pub(super) fn channel_type_from_open_channel(
 ) -> Result<ChannelTypeFeatures, ChannelError> {
        if let Some(channel_type) = &common_fields.channel_type {
                if channel_type.supports_any_optional_bits() {
-                       return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+                       return Err(ChannelError::close("Channel Type field contained optional bits - this is not allowed".to_owned()));
                }
 
                // We only support the channel types defined by the `ChannelManager` in
                // `provided_channel_type_features`. The channel type must always support
                // `static_remote_key`.
                if !channel_type.requires_static_remote_key() {
-                       return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+                       return Err(ChannelError::close("Channel Type was not understood - we require static remote key".to_owned()));
                }
                // Make sure we support all of the features behind the channel type.
                if !channel_type.is_subset(our_supported_features) {
-                       return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+                       return Err(ChannelError::close("Channel Type contains unsupported features".to_owned()));
                }
                let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
                if channel_type.requires_scid_privacy() && announced_channel {
-                       return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+                       return Err(ChannelError::close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
                }
                Ok(channel_type.clone())
        } else {
                let channel_type = ChannelTypeFeatures::from_init(&their_features);
                if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                       return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
                }
                Ok(channel_type)
        }
@@ -7778,7 +7806,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                          F::Target: FeeEstimator,
                          L::Target: Logger,
        {
-               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
+               let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None);
 
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
@@ -7833,7 +7861,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                ) {
                        panic!("Tried to send accept_channel after channel had moved forward");
                }
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Tried to send an accept_channel for a channel that has already advanced");
                }
 
@@ -7846,7 +7874,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        ///
        /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
        fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
-               let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               let first_per_commitment_point = self.context.holder_commitment_point.current_point();
                let keys = self.context.get_holder_pubkeys();
 
                msgs::AcceptChannel {
@@ -7888,8 +7917,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
                let funding_script = self.context.get_funding_redeemscript();
 
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+               let keys = self.context.build_holder_transaction_keys();
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger).tx;
                let trusted_tx = initial_commitment_tx.trust();
                let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
@@ -7910,7 +7939,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                L::Target: Logger
        {
                if self.context.is_outbound() {
-                       return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned())));
                }
                if !matches!(
                        self.context.channel_state, ChannelState::NegotiatingFunding(flags)
@@ -7919,11 +7948,11 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
                        // remember the channel, so it's safe to just send an error_message here and drop the
                        // channel.
-                       return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned())));
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7955,7 +7984,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                );
 
                if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
-                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+                       return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
                }
 
                // Now that we're past error-generating stuff, update our local state:
@@ -7963,12 +7992,12 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
                self.context.cur_counterparty_commitment_transaction_number -= 1;
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger);
 
                let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
 
                let funding_redeemscript = self.context.get_funding_redeemscript();
-               let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+               let funding_txo_script = funding_redeemscript.to_p2wsh();
                let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
                let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
                let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
@@ -7994,10 +8023,10 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                // `ChannelMonitor`.
                let mut channel = Channel {
                        context: self.context,
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        dual_funding_channel_context: None,
                };
-               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 
                Ok((channel, funding_signed, channel_monitor))
@@ -8005,15 +8034,15 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 }
 
 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
        pub context: ChannelContext<SP>,
        pub unfunded_context: UnfundedChannelContext,
-       #[cfg(dual_funding)]
+       #[cfg(any(dual_funding, splicing))]
        pub dual_funding_context: DualFundingChannelContext,
 }
 
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
        pub fn new<ES: Deref, F: Deref>(
                fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
@@ -8084,15 +8113,15 @@ impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
                        debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
                }
 
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
                }
 
                let first_per_commitment_point = self.context.holder_signer.as_ref()
-                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
+                       .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(),
                                &self.context.secp_ctx);
                let second_per_commitment_point = self.context.holder_signer.as_ref()
-                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
+                       .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1,
                                &self.context.secp_ctx);
                let keys = self.context.get_holder_pubkeys();
 
@@ -8129,14 +8158,14 @@ impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
 }
 
 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
        pub context: ChannelContext<SP>,
        pub unfunded_context: UnfundedChannelContext,
        pub dual_funding_context: DualFundingChannelContext,
 }
 
-#[cfg(dual_funding)]
+#[cfg(any(dual_funding, splicing))]
 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
        /// Creates a new dual-funded channel from a remote side's request for one.
        /// Assumes chain_hash has already been checked and corresponds with what we expect!
@@ -8159,7 +8188,7 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
                if msg.common_fields.channel_type.is_none() {
-                       return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
+                       return Err(ChannelError::close(format!("Rejecting V2 channel {} missing channel_type",
                                msg.common_fields.temporary_channel_id)))
                }
                let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
@@ -8226,7 +8255,7 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
                ) {
                        debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
                }
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
                }
 
@@ -8240,9 +8269,9 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
        /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
        fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
                let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
-                       self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+                       self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx);
                let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
-                       self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
+                       self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx);
                let keys = self.context.get_holder_pubkeys();
 
                msgs::AcceptChannelV2 {
@@ -8415,7 +8444,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                }
                self.context.destination_script.write(writer)?;
 
-               self.context.cur_holder_commitment_transaction_number.write(writer)?;
+               self.context.holder_commitment_point.transaction_number().write(writer)?;
                self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
                self.context.value_to_self_msat.write(writer)?;
 
@@ -8689,6 +8718,10 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
                }
 
+               // `current_point` will become optional when async signing is implemented.
+               let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point());
+               let next_holder_commitment_point = self.context.holder_commitment_point.next_point();
+
                write_tlv_fields!(writer, {
                        (0, self.context.announcement_sigs, option),
                        // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
@@ -8725,7 +8758,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       // 45 and 47 are reserved for async signing
+                       (45, cur_holder_commitment_point, option),
+                       (47, next_holder_commitment_point, option),
                        (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
 
@@ -9036,6 +9070,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
                let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
 
+               let mut cur_holder_commitment_point_opt: Option<PublicKey> = None;
+               let mut next_holder_commitment_point_opt: Option<PublicKey> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -9066,7 +9103,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       // 45 and 47 are reserved for async signing
+                       (45, cur_holder_commitment_point_opt, option),
+                       (47, next_holder_commitment_point_opt, option),
                        (49, local_initiated_shutdown, option),
                });
 
@@ -9178,6 +9216,24 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        }
                }
 
+               // If we're restoring this channel for the first time after an upgrade, then we require that the
+               // signer be available so that we can immediately populate the current commitment point. Channel
+               // restoration will fail if this is not possible.
+               let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) {
+                       (Some(current), Some(next)) => HolderCommitmentPoint::Available {
+                               transaction_number: cur_holder_commitment_transaction_number, current, next
+                       },
+                       (Some(current), _) => HolderCommitmentPoint::Available {
+                               transaction_number: cur_holder_commitment_transaction_number, current,
+                               next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
+                       },
+                       (_, _) => HolderCommitmentPoint::Available {
+                               transaction_number: cur_holder_commitment_transaction_number,
+                               current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx),
+                               next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
+                       },
+               };
+
                Ok(Channel {
                        context: ChannelContext {
                                user_id,
@@ -9203,7 +9259,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                shutdown_scriptpubkey,
                                destination_script,
 
-                               cur_holder_commitment_transaction_number,
+                               holder_commitment_point,
                                cur_counterparty_commitment_transaction_number,
                                value_to_self_msat,
 
@@ -9303,7 +9359,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                                blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        },
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        dual_funding_channel_context: None,
                })
        }
@@ -9312,13 +9368,14 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 #[cfg(test)]
 mod tests {
        use std::cmp;
+       use bitcoin::amount::Amount;
        use bitcoin::blockdata::constants::ChainHash;
        use bitcoin::blockdata::script::{ScriptBuf, Builder};
-       use bitcoin::blockdata::transaction::{Transaction, TxOut};
+       use bitcoin::blockdata::transaction::{Transaction, TxOut, Version};
        use bitcoin::blockdata::opcodes;
-       use bitcoin::network::constants::Network;
+       use bitcoin::network::Network;
        use crate::ln::onion_utils::INVALID_ONION_BLINDING;
-       use crate::ln::{PaymentHash, PaymentPreimage};
+       use crate::ln::types::{PaymentHash, PaymentPreimage};
        use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
        use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
        use crate::ln::channel::InitFeatures;
@@ -9345,9 +9402,8 @@ mod tests {
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::hex::FromHex;
-       use bitcoin::hash_types::WPubkeyHash;
        use bitcoin::blockdata::locktime::absolute::LockTime;
-       use bitcoin::address::{WitnessProgram, WitnessVersion};
+       use bitcoin::{WitnessProgram, WitnessVersion, WPubkeyHash};
        use crate::prelude::*;
 
        #[test]
@@ -9503,8 +9559,8 @@ mod tests {
 
                // Node A --> Node B: funding created
                let output_script = node_a_chan.context.get_funding_redeemscript();
-               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
-                       value: 10000000, script_pubkey: output_script.clone(),
+               let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
@@ -9632,8 +9688,8 @@ mod tests {
 
                // Node A --> Node B: funding created
                let output_script = node_a_chan.context.get_funding_redeemscript();
-               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
-                       value: 10000000, script_pubkey: output_script.clone(),
+               let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
@@ -9821,8 +9877,8 @@ mod tests {
 
                // Node A --> Node B: funding created
                let output_script = node_a_chan.context.get_funding_redeemscript();
-               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
-                       value: 10000000, script_pubkey: output_script.clone(),
+               let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
@@ -9888,8 +9944,8 @@ mod tests {
                        &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
                ).unwrap();
                outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
-               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
-                       value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+               let tx = Transaction { version: Version::ONE, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: Amount::from_sat(10000000), script_pubkey: outbound_chan.context.get_funding_redeemscript(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
@@ -10146,7 +10202,7 @@ mod tests {
                                                &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
                                        let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
                                        let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
-                                       let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
+                                       let htlc_sighash = Message::from_digest(sighash::SighashCache::new(&htlc_tx).p2wsh_signature_hash(0, &htlc_redeemscript, htlc.to_bitcoin_amount(), htlc_sighashtype).unwrap().as_raw_hash().to_byte_array());
                                        assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
 
                                        let mut preimage: Option<PaymentPreimage> = None;
@@ -11012,15 +11068,15 @@ mod tests {
                // Fund the channel with a batch funding transaction.
                let output_script = node_a_chan.context.get_funding_redeemscript();
                let tx = Transaction {
-                       version: 1,
+                       version: Version::ONE,
                        lock_time: LockTime::ZERO,
                        input: Vec::new(),
                        output: vec![
                                TxOut {
-                                       value: 10000000, script_pubkey: output_script.clone(),
+                                       value: Amount::from_sat(10000000), script_pubkey: output_script.clone(),
                                },
                                TxOut {
-                                       value: 10000000, script_pubkey: Builder::new().into_script(),
+                                       value: Amount::from_sat(10000000), script_pubkey: Builder::new().into_script(),
                                },
                        ]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
@@ -11077,6 +11133,6 @@ mod tests {
                // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
                node_a_chan.set_batch_ready();
                assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
-               assert!(node_a_chan.check_get_channel_ready(0).is_some());
+               assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some());
        }
 }