Merge pull request #3152 from alecchendev/2024-06-async-commit-secret-raa
[rust-lightning] / lightning / src / ln / channel.rs
index 716eba7cf509e2ab79618a0aad0fb2ede36a458b..46e39ffd5d65b0c43250becd73c074d5fecc2494 100644 (file)
@@ -130,7 +130,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCResolution,
        },
        (2, Pending) => {
                (0, update_add_htlc, required),
-       };
+       },
 );
 
 enum InboundHTLCState {
@@ -710,7 +710,7 @@ pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
 pub(super) enum ChannelError {
        Ignore(String),
        Warn(String),
-       Close(String),
+       Close((String, ClosureReason)),
 }
 
 impl fmt::Debug for ChannelError {
@@ -718,7 +718,7 @@ impl fmt::Debug for ChannelError {
                match self {
                        &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
                        &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
-                       &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+                       &ChannelError::Close((ref e, _)) => write!(f, "Close : {}", e),
                }
        }
 }
@@ -728,11 +728,17 @@ impl fmt::Display for ChannelError {
                match self {
                        &ChannelError::Ignore(ref e) => write!(f, "{}", e),
                        &ChannelError::Warn(ref e) => write!(f, "{}", e),
-                       &ChannelError::Close(ref e) => write!(f, "{}", e),
+                       &ChannelError::Close((ref e, _)) => write!(f, "{}", e),
                }
        }
 }
 
+impl ChannelError {
+       pub(super) fn close(err: String) -> Self {
+               ChannelError::Close((err.clone(), ClosureReason::ProcessingError { err }))
+       }
+}
+
 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
        pub logger: &'a L,
        pub peer_id: Option<PublicKey>,
@@ -767,7 +773,7 @@ macro_rules! secp_check {
        ($res: expr, $err: expr) => {
                match $res {
                        Ok(thing) => thing,
-                       Err(_) => return Err(ChannelError::Close($err)),
+                       Err(_) => return Err(ChannelError::close($err)),
                }
        };
 }
@@ -900,8 +906,10 @@ pub(super) struct MonitorRestoreUpdates {
 #[allow(unused)]
 pub(super) struct SignerResumeUpdates {
        pub commitment_update: Option<msgs::CommitmentUpdate>,
+       pub revoke_and_ack: Option<msgs::RevokeAndACK>,
        pub funding_signed: Option<msgs::FundingSigned>,
        pub channel_ready: Option<msgs::ChannelReady>,
+       pub order: RAACommitmentOrder,
 }
 
 /// The return value of `channel_reestablish`
@@ -933,6 +941,111 @@ pub(crate) struct ShutdownResult {
        pub(crate) channel_funding_txo: Option<OutPoint>,
 }
 
+/// Tracks the transaction number, along with current and next commitment points.
+/// This consolidates the logic to advance our commitment number and request new
+/// commitment points from our signer.
+#[derive(Debug, Copy, Clone)]
+enum HolderCommitmentPoint {
+       // TODO: add a variant for before our first commitment point is retrieved
+       /// We've advanced our commitment number and are waiting on the next commitment point.
+       /// Until the `get_per_commitment_point` signer method becomes async, this variant
+       /// will not be used.
+       PendingNext { transaction_number: u64, current: PublicKey },
+       /// Our current commitment point is ready, we've cached our next point,
+       /// and we are not pending a new one.
+       Available { transaction_number: u64, current: PublicKey, next: PublicKey },
+}
+
+impl HolderCommitmentPoint {
+       pub fn new<SP: Deref>(signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>) -> Self
+               where SP::Target: SignerProvider
+       {
+               HolderCommitmentPoint::Available {
+                       transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       // TODO(async_signing): remove this expect with the Uninitialized variant
+                       current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx)
+                               .expect("Signer must be able to provide initial commitment point"),
+                       // TODO(async_signing): remove this expect with the Uninitialized variant
+                       next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx)
+                               .expect("Signer must be able to provide second commitment point"),
+               }
+       }
+
+       pub fn is_available(&self) -> bool {
+               if let HolderCommitmentPoint::Available { .. } = self { true } else { false }
+       }
+
+       pub fn transaction_number(&self) -> u64 {
+               match self {
+                       HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number,
+                       HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number,
+               }
+       }
+
+       pub fn current_point(&self) -> PublicKey {
+               match self {
+                       HolderCommitmentPoint::PendingNext { current, .. } => *current,
+                       HolderCommitmentPoint::Available { current, .. } => *current,
+               }
+       }
+
+       pub fn next_point(&self) -> Option<PublicKey> {
+               match self {
+                       HolderCommitmentPoint::PendingNext { .. } => None,
+                       HolderCommitmentPoint::Available { next, .. } => Some(*next),
+               }
+       }
+
+       /// If we are pending the next commitment point, this method tries asking the signer again,
+       /// and transitions to the next state if successful.
+       ///
+       /// This method is used for the following transitions:
+       /// - `PendingNext` -> `Available`
+       pub fn try_resolve_pending<SP: Deref, L: Deref>(&mut self, signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>, logger: &L)
+               where SP::Target: SignerProvider, L::Target: Logger
+       {
+               if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self {
+                       if let Ok(next) = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx) {
+                               log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1);
+                               *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next };
+                       } else {
+                               log_trace!(logger, "Next per-commitment point {} is pending", transaction_number);
+                       }
+               }
+       }
+
+       /// If we are not pending the next commitment point, this method advances the commitment number
+       /// and requests the next commitment point from the signer. Returns `Ok` if we were at
+       /// `Available` and were able to advance our commitment number (even if we are still pending
+       /// the next commitment point).
+       ///
+       /// If our signer is not ready to provide the next commitment point, we will
+       /// only advance to `PendingNext`, and should be tried again later in `signer_unblocked`
+       /// via `try_resolve_pending`.
+       ///
+       /// If our signer is ready to provide the next commitment point, we will advance all the
+       /// way to `Available`.
+       ///
+       /// This method is used for the following transitions:
+       /// - `Available` -> `PendingNext`
+       /// - `Available` -> `PendingNext` -> `Available` (in one fell swoop)
+       pub fn advance<SP: Deref, L: Deref>(
+               &mut self, signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>, logger: &L
+       ) -> Result<(), ()>
+               where SP::Target: SignerProvider, L::Target: Logger
+       {
+               if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self {
+                       *self = HolderCommitmentPoint::PendingNext {
+                               transaction_number: *transaction_number - 1,
+                               current: *next,
+                       };
+                       self.try_resolve_pending(signer, secp_ctx, logger);
+                       return Ok(());
+               }
+               Err(())
+       }
+}
+
 /// If the majority of the channels funds are to the fundee and the initiator holds only just
 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
@@ -1111,7 +1224,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        // generation start at 0 and count up...this simplifies some parts of implementation at the
        // cost of others, but should really just be changed.
 
-       cur_holder_commitment_transaction_number: u64,
+       holder_commitment_point: HolderCommitmentPoint,
        cur_counterparty_commitment_transaction_number: u64,
        value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
        pending_inbound_htlcs: Vec<InboundHTLCOutput>,
@@ -1140,6 +1253,14 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        monitor_pending_finalized_fulfills: Vec<HTLCSource>,
        monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
 
+       /// If we went to send a revoke_and_ack but our signer was unable to give us a signature,
+       /// we should retry at some point in the future when the signer indicates it may have a
+       /// signature for us.
+       ///
+       /// This may also be used to make sure we send a `revoke_and_ack` after a `commitment_signed`
+       /// if we need to maintain ordering of messages, but are pending the signer on a previous
+       /// message.
+       signer_pending_revoke_and_ack: bool,
        /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
        /// but our signer (initially) refused to give us a signature, we should retry at some point in
        /// the future when the signer indicates it may have a signature for us.
@@ -1398,90 +1519,90 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let pubkeys = holder_signer.pubkeys().clone();
 
                if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+                       return Err(ChannelError::close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
                }
 
                // Check sanity of message fields:
                if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
                                config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
                                open_channel_fields.funding_satoshis, our_funding_satoshis)));
                }
                if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
-                       return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
                }
                if msg_channel_reserve_satoshis > channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
                }
                let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
                if msg_push_msat > full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
+                       return Err(ChannelError::close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
                }
                if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
                }
                if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
+                       return Err(ChannelError::close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
                }
                Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
 
                let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
                if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
+                       return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
                }
                if open_channel_fields.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+                       return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
                }
                if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
+                       return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
                }
 
                // Now check against optional parameters as set by config...
                if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
-                       return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+                       return Err(ChannelError::close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
                }
                if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+                       return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
                }
                if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+                       return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
                }
                if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+                       return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
                }
                if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+                       return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
                }
                if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
 
                // Convert things into internal flags and prep our state:
 
                if config.channel_handshake_limits.force_announced_channel_preference {
                        if config.channel_handshake_config.announced_channel != announced_channel {
-                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+                               return Err(ChannelError::close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
                        }
                }
 
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        // Protocol level safety check in place, although it should never happen because
                        // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+                       return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
                if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
+                       return Err(ChannelError::close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
                }
                if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
                                msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
                }
                if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+                       return Err(ChannelError::close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
                }
 
                // check if the funder's amount for the initial commitment tx is sufficient
@@ -1494,14 +1615,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
                let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
                if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
-                       return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
+                       return Err(ChannelError::close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
                }
 
                let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
                // While it's reasonable for us to not meet the channel reserve initially (if they don't
                // want to push much to us), our counterparty should always have more than our reserve.
                if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
+                       return Err(ChannelError::close("Insufficient funding amount for initial reserve".to_owned()));
                }
 
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
@@ -1512,14 +1633,14 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                                None
                                        } else {
                                                if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+                                                       return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
                                                }
                                                Some(script.clone())
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                                &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                                       return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
                } else { None };
@@ -1527,19 +1648,19 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
                        match signer_provider.get_shutdown_scriptpubkey() {
                                Ok(scriptpubkey) => Some(scriptpubkey),
-                               Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+                               Err(_) => return Err(ChannelError::close("Failed to get upfront shutdown scriptpubkey".to_owned())),
                        }
                } else { None };
 
                if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
                        if !shutdown_scriptpubkey.is_compatible(&their_features) {
-                               return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+                               return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
                        }
                }
 
                let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
                        Ok(script) => script,
-                       Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+                       Err(_) => return Err(ChannelError::close("Failed to get destination script".to_owned())),
                };
 
                let mut secp_ctx = Secp256k1::new();
@@ -1553,6 +1674,9 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
 
+               let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+               let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
                // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
 
                let channel_context = ChannelContext {
@@ -1578,11 +1702,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                        latest_monitor_update_id: 0,
 
-                       holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+                       holder_signer,
                        shutdown_scriptpubkey,
                        destination_script,
 
-                       cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       holder_commitment_point,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat,
 
@@ -1605,6 +1729,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        monitor_pending_finalized_fulfills: Vec::new(),
                        monitor_pending_update_adds: Vec::new(),
 
+                       signer_pending_revoke_and_ack: false,
                        signer_pending_commitment_update: false,
                        signer_pending_funding: false,
 
@@ -1696,7 +1821,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                Ok(channel_context)
        }
 
-       fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
+       fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
                fee_estimator: &'a LowerBoundedFeeEstimator<F>,
                entropy_source: &'a ES,
                signer_provider: &'a SP,
@@ -1713,11 +1838,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                channel_keys_id: [u8; 32],
                holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
                pubkeys: ChannelPublicKeys,
+               _logger: L,
        ) -> Result<ChannelContext<SP>, APIError>
                where
                        ES::Target: EntropySource,
                        F::Target: FeeEstimator,
                        SP::Target: SignerProvider,
+                       L::Target: Logger,
        {
                // This will be updated with the counterparty contribution if this is a dual-funded channel
                let channel_value_satoshis = funding_satoshis;
@@ -1777,6 +1904,9 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
 
+               let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
+               let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
+
                Ok(Self {
                        user_id,
 
@@ -1800,11 +1930,11 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
                        latest_monitor_update_id: 0,
 
-                       holder_signer: ChannelSignerType::Ecdsa(holder_signer),
+                       holder_signer,
                        shutdown_scriptpubkey,
                        destination_script,
 
-                       cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+                       holder_commitment_point,
                        cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat,
 
@@ -1827,6 +1957,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        monitor_pending_finalized_fulfills: Vec::new(),
                        monitor_pending_update_adds: Vec::new(),
 
+                       signer_pending_revoke_and_ack: false,
                        signer_pending_commitment_update: false,
                        signer_pending_funding: false,
 
@@ -2037,8 +2168,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 
        /// Returns the holder signer for this channel.
        #[cfg(test)]
-       pub fn get_signer(&self) -> &ChannelSignerType<SP> {
-               return &self.holder_signer
+       pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType<SP> {
+               return &mut self.holder_signer
        }
 
        /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
@@ -2065,6 +2196,143 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                }
        }
 
+       /// Performs checks against necessary constraints after receiving either an `accept_channel` or
+       /// `accept_channel2` message.
+       pub fn do_accept_channel_checks(
+               &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures,
+               common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64,
+       ) -> Result<(), ChannelError> {
+               let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits };
+
+               // Check sanity of message fields:
+               if !self.is_outbound() {
+                       return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
+               }
+               if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
+                       return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
+               }
+               if common_fields.dust_limit_satoshis > 21000000 * 100000000 {
+                       return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis)));
+               }
+               if channel_reserve_satoshis > self.channel_value_satoshis {
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis)));
+               }
+               if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
+               }
+               if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+                               channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
+               }
+               let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000;
+               if common_fields.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat)));
+               }
+               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if common_fields.to_self_delay > max_delay_acceptable {
+                       return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay)));
+               }
+               if common_fields.max_accepted_htlcs < 1 {
+                       return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+               }
+               if common_fields.max_accepted_htlcs > MAX_HTLCS {
+                       return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS)));
+               }
+
+               // Now check against optional parameters as set by config...
+               if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+                       return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+               }
+               if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+               }
+               if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+                       return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+               }
+               if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+                       return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+               }
+               if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+                       return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+               }
+               if common_fields.minimum_depth > peer_limits.max_minimum_depth {
+                       return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth)));
+               }
+
+               if let Some(ty) = &common_fields.channel_type {
+                       if *ty != self.channel_type {
+                               return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+                       }
+               } else if their_features.supports_channel_type() {
+                       // Assume they've accepted the channel type as they said they understand it.
+               } else {
+                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
+                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+                               return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       }
+                       self.channel_type = channel_type.clone();
+                       self.channel_transaction_parameters.channel_type_features = channel_type;
+               }
+
+               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+                       match &common_fields.shutdown_scriptpubkey {
+                               &Some(ref script) => {
+                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+                                       if script.len() == 0 {
+                                               None
+                                       } else {
+                                               if !script::is_bolt2_compliant(&script, their_features) {
+                                                       return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+                                               }
+                                               Some(script.clone())
+                                       }
+                               },
+                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+                               &None => {
+                                       return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+                               }
+                       }
+               } else { None };
+
+               self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis;
+               self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
+               self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis);
+               self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat;
+               self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs;
+
+               if peer_limits.trust_own_funding_0conf {
+                       self.minimum_depth = Some(common_fields.minimum_depth);
+               } else {
+                       self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth));
+               }
+
+               let counterparty_pubkeys = ChannelPublicKeys {
+                       funding_pubkey: common_fields.funding_pubkey,
+                       revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint),
+                       payment_point: common_fields.payment_basepoint,
+                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint),
+                       htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint)
+               };
+
+               self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+                       selected_contest_delay: common_fields.to_self_delay,
+                       pubkeys: counterparty_pubkeys,
+               });
+
+               self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point);
+               self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+               self.channel_state = ChannelState::NegotiatingFunding(
+                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+               );
+               self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+               Ok(())
+       }
+
        /// Returns the block hash in which our funding transaction was confirmed.
        pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
                self.funding_tx_confirmed_in
@@ -2498,8 +2766,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
        /// our counterparty!)
        /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
        /// TODO Some magic rust shit to compile-time check this?
-       fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
-               let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
+       fn build_holder_transaction_keys(&self) -> TxCreationKeys {
+               let per_commitment_point = self.holder_commitment_point.current_point();
                let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
                let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
                let counterparty_pubkeys = self.get_counterparty_pubkeys();
@@ -2550,7 +2818,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
                let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
-               cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
+               cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX))
        }
 
        /// Get forwarding information for the counterparty.
@@ -3506,7 +3774,12 @@ impl<SP: Deref> Channel<SP> where
                                        return Ok(());
                                }
                        }
-                       return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
+                       return Err(ChannelError::Close((format!(
+                               "Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit
+                       ), ClosureReason::PeerFeerateTooLow {
+                               peer_feerate_sat_per_kw: feerate_per_kw,
+                               required_feerate_sat_per_kw: lower_limit,
+                       })));
                }
                Ok(())
        }
@@ -3956,7 +4229,7 @@ impl<SP: Deref> Channel<SP> where
                        }
                        // If we reconnected before sending our `channel_ready` they may still resend theirs.
                        ChannelState::ChannelReady(_) => check_reconnection = true,
-                       _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
+                       _ => return Err(ChannelError::close("Peer sent a channel_ready at a strange time".to_owned())),
                }
                if check_reconnection {
                        // They probably disconnected/reconnected and re-sent the channel_ready, which is
@@ -3979,7 +4252,7 @@ impl<SP: Deref> Channel<SP> where
                                                ).expect("We already advanced, so previous secret keys should have been validated already")))
                                };
                        if expected_point != Some(msg.next_per_commitment_point) {
-                               return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+                               return Err(ChannelError::close("Peer sent a reconnect channel_ready with a different point".to_owned()));
                        }
                        return Ok(None);
                }
@@ -3997,32 +4270,32 @@ impl<SP: Deref> Channel<SP> where
                fee_estimator: &LowerBoundedFeeEstimator<F>,
        ) -> Result<(), ChannelError> where F::Target: FeeEstimator {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
                if self.context.channel_state.is_remote_shutdown_sent() {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
                }
                if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+                       return Err(ChannelError::close("Remote side tried to send more than the total value of the channel".to_owned()));
                }
                if msg.amount_msat == 0 {
-                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+                       return Err(ChannelError::close("Remote side tried to send a 0-msat HTLC".to_owned()));
                }
                if msg.amount_msat < self.context.holder_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+                       return Err(ChannelError::close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
                }
 
                let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
                let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
                if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
-                       return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+                       return Err(ChannelError::close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
                }
                if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+                       return Err(ChannelError::close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
                }
 
                // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
@@ -4051,7 +4324,7 @@ impl<SP: Deref> Channel<SP> where
                let pending_remote_value_msat =
                        self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
                if pending_remote_value_msat < msg.amount_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+                       return Err(ChannelError::close("Remote HTLC add would overdraw remaining funds".to_owned()));
                }
 
                // Check that the remote can afford to pay for this HTLC on-chain at the current
@@ -4067,10 +4340,10 @@ impl<SP: Deref> Channel<SP> where
                                0
                        };
                        if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
-                               return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+                               return Err(ChannelError::close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
                        };
                        if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
-                               return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+                               return Err(ChannelError::close("Remote HTLC add would put them under remote reserve value".to_owned()));
                        }
                }
 
@@ -4084,14 +4357,14 @@ impl<SP: Deref> Channel<SP> where
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
                        let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                        if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
-                               return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+                               return Err(ChannelError::close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
                        }
                }
                if self.context.next_counterparty_htlc_id != msg.htlc_id {
-                       return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+                       return Err(ChannelError::close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
                }
                if msg.cltv_expiry >= 500000000 {
-                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+                       return Err(ChannelError::close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
                if self.context.channel_state.is_local_shutdown_sent() {
@@ -4125,32 +4398,32 @@ impl<SP: Deref> Channel<SP> where
                                        Some(payment_preimage) => {
                                                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
                                                if payment_hash != htlc.payment_hash {
-                                                       return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+                                                       return Err(ChannelError::close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
                                                }
                                                OutboundHTLCOutcome::Success(Some(payment_preimage))
                                        }
                                };
                                match htlc.state {
                                        OutboundHTLCState::LocalAnnounced(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+                                               return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
                                        OutboundHTLCState::Committed => {
                                                htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
                                        },
                                        OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
-                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+                                               return Err(ChannelError::close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
                                }
                                return Ok(htlc);
                        }
                }
-               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+               Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
@@ -4158,10 +4431,10 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fail HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -4170,10 +4443,10 @@ impl<SP: Deref> Channel<SP> where
 
        pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -4184,20 +4457,20 @@ impl<SP: Deref> Channel<SP> where
                where L::Target: Logger
        {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got commitment signed message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+                       return Err(ChannelError::close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let funding_script = self.context.get_funding_redeemscript();
 
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+               let keys = self.context.build_holder_transaction_keys();
 
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+               let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger);
                let commitment_txid = {
                        let trusted_tx = commitment_stats.tx.trust();
                        let bitcoin_tx = trusted_tx.built_transaction();
@@ -4208,7 +4481,7 @@ impl<SP: Deref> Channel<SP> where
                                log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
                                log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
-                               return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+                               return Err(ChannelError::close("Invalid commitment tx signature from peer".to_owned()));
                        }
                        bitcoin_tx.txid
                };
@@ -4223,7 +4496,7 @@ impl<SP: Deref> Channel<SP> where
                        debug_assert!(!self.context.is_outbound());
                        let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
                        if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
-                               return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+                               return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned()));
                        }
                }
                #[cfg(any(test, fuzzing))]
@@ -4245,7 +4518,7 @@ impl<SP: Deref> Channel<SP> where
                }
 
                if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
-                       return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+                       return Err(ChannelError::close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
                }
 
                // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
@@ -4278,7 +4551,7 @@ impl<SP: Deref> Channel<SP> where
                                        log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
                                        encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
                                if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
-                                       return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+                                       return Err(ChannelError::close("Invalid HTLC tx signature from peer".to_owned()));
                                }
                                if !separate_nondust_htlc_sources {
                                        htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
@@ -4303,7 +4576,7 @@ impl<SP: Deref> Channel<SP> where
                );
 
                self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
-                       .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+                       .map_err(|_| ChannelError::close("Failed to validate our commitment".to_owned()))?;
 
                // Update state now that we've passed all the can-fail calls...
                let mut need_commitment = false;
@@ -4360,7 +4633,18 @@ impl<SP: Deref> Channel<SP> where
                        channel_id: Some(self.context.channel_id()),
                };
 
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() {
+                       // We only fail to advance our commitment point/number if we're currently
+                       // waiting for our signer to unblock and provide a commitment point.
+                       // During post-funding channel operation, we only advance our point upon
+                       // receiving a commitment_signed, and our counterparty cannot send us
+                       // another commitment signed until we've provided a new commitment point
+                       // in revoke_and_ack, which requires unblocking our signer and completing
+                       // the advance to the next point. This should be unreachable since
+                       // a new commitment_signed should fail at our signature checks above.
+                       debug_assert!(false, "We should be ready to advance our commitment point by the time we receive commitment_signed");
+                       return Err(ChannelError::close("Failed to advance our commitment point".to_owned()));
+               }
                self.context.expecting_peer_commitment_signed = false;
                // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
                // build_commitment_no_status_check() next which will reset this to RAAFirst.
@@ -4556,20 +4840,20 @@ impl<SP: Deref> Channel<SP> where
        where F::Target: FeeEstimator, L::Target: Logger,
        {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
-                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+                       return Err(ChannelError::close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+                       return Err(ChannelError::close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
                let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
 
                if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
                        if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
-                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+                               return Err(ChannelError::close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
                        }
                }
 
@@ -4581,7 +4865,7 @@ impl<SP: Deref> Channel<SP> where
                        // lot of work, and there's some chance this is all a misunderstanding anyway.
                        // We have to do *something*, though, since our signer may get mad at us for otherwise
                        // jumping a remote commitment number, so best to just force-close and move on.
-                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+                       return Err(ChannelError::close("Received an unexpected revoke_and_ack".to_owned()));
                }
 
                #[cfg(any(test, fuzzing))]
@@ -4595,7 +4879,7 @@ impl<SP: Deref> Channel<SP> where
                                ecdsa.validate_counterparty_revocation(
                                        self.context.cur_counterparty_commitment_transaction_number + 1,
                                        &secret
-                               ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+                               ).map_err(|_| ChannelError::close("Failed to validate revocation from peer".to_owned()))?;
                        },
                        // TODO (taproot|arik)
                        #[cfg(taproot)]
@@ -4603,7 +4887,7 @@ impl<SP: Deref> Channel<SP> where
                };
 
                self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
-                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+                       .map_err(|_| ChannelError::close("Previous secrets did not match new one".to_owned()))?;
                self.context.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.context.latest_monitor_update_id,
@@ -4871,8 +5155,8 @@ impl<SP: Deref> Channel<SP> where
                // Before proposing a feerate update, check that we can actually afford the new fee.
                let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
                let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate);
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+               let keys = self.context.build_holder_transaction_keys();
+               let commitment_stats = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, true, logger);
                let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
                let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
                if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
@@ -5054,12 +5338,7 @@ impl<SP: Deref> Channel<SP> where
                        assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
                                "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
                        self.context.monitor_pending_channel_ready = false;
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-                       Some(msgs::ChannelReady {
-                               channel_id: self.context.channel_id(),
-                               next_per_commitment_point,
-                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                       })
+                       Some(self.get_channel_ready())
                } else { None };
 
                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
@@ -5083,12 +5362,23 @@ impl<SP: Deref> Channel<SP> where
                        };
                }
 
-               let raa = if self.context.monitor_pending_revoke_and_ack {
-                       Some(self.get_last_revoke_and_ack())
+               let mut raa = if self.context.monitor_pending_revoke_and_ack {
+                       self.get_last_revoke_and_ack(logger)
                } else { None };
-               let commitment_update = if self.context.monitor_pending_commitment_signed {
+               let mut commitment_update = if self.context.monitor_pending_commitment_signed {
                        self.get_last_commitment_update_for_send(logger).ok()
                } else { None };
+               if self.context.resend_order == RAACommitmentOrder::CommitmentFirst
+                       && self.context.signer_pending_commitment_update && raa.is_some() {
+                       self.context.signer_pending_revoke_and_ack = true;
+                       raa = None;
+               }
+               if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst
+                       && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() {
+                       self.context.signer_pending_commitment_update = true;
+                       commitment_update = None;
+               }
+
                if commitment_update.is_some() {
                        self.mark_awaiting_response();
                }
@@ -5106,14 +5396,34 @@ impl<SP: Deref> Channel<SP> where
                }
        }
 
+       pub fn check_for_stale_feerate<L: Logger>(&mut self, logger: &L, min_feerate: u32) -> Result<(), ClosureReason> {
+               if self.context.is_outbound() {
+                       // While its possible our fee is too low for an outbound channel because we've been
+                       // unable to increase the fee, we don't try to force-close directly here.
+                       return Ok(());
+               }
+               if self.context.feerate_per_kw < min_feerate {
+                       log_info!(logger,
+                               "Closing channel as feerate of {} is below required {} (the minimum required rate over the past day)",
+                               self.context.feerate_per_kw, min_feerate
+                       );
+                       Err(ClosureReason::PeerFeerateTooLow {
+                               peer_feerate_sat_per_kw: self.context.feerate_per_kw,
+                               required_feerate_sat_per_kw: min_feerate,
+                       })
+               } else {
+                       Ok(())
+               }
+       }
+
        pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
                where F::Target: FeeEstimator, L::Target: Logger
        {
                if self.context.is_outbound() {
-                       return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+                       return Err(ChannelError::close("Non-funding remote tried to update channel fee".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
                Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
 
@@ -5124,11 +5434,11 @@ impl<SP: Deref> Channel<SP> where
                let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
                let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
                if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
-                       return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+                       return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
                                msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
                }
                if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
-                       return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+                       return Err(ChannelError::close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
                                msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
                }
                Ok(())
@@ -5138,37 +5448,94 @@ impl<SP: Deref> Channel<SP> where
        /// blocked.
        #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
-               let commitment_update = if self.context.signer_pending_commitment_update {
-                       self.get_last_commitment_update_for_send(logger).ok()
-               } else { None };
+               if !self.context.holder_commitment_point.is_available() {
+                       log_trace!(logger, "Attempting to update holder per-commitment point...");
+                       self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger);
+               }
                let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
                        self.context.get_funding_signed_msg(logger).1
                } else { None };
                let channel_ready = if funding_signed.is_some() {
-                       self.check_get_channel_ready(0)
+                       self.check_get_channel_ready(0, logger)
+               } else { None };
+
+               let mut commitment_update = if self.context.signer_pending_commitment_update {
+                       log_trace!(logger, "Attempting to generate pending commitment update...");
+                       self.get_last_commitment_update_for_send(logger).ok()
+               } else { None };
+               let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack {
+                       log_trace!(logger, "Attempting to generate pending revoke and ack...");
+                       self.get_last_revoke_and_ack(logger)
                } else { None };
 
-               log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
+               if self.context.resend_order == RAACommitmentOrder::CommitmentFirst
+                       && self.context.signer_pending_commitment_update && revoke_and_ack.is_some() {
+                       log_trace!(logger, "Signer unblocked for revoke and ack, but unable to send due to resend order, waiting on signer for commitment update");
+                       self.context.signer_pending_revoke_and_ack = true;
+                       revoke_and_ack = None;
+               }
+               if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst
+                       && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() {
+                       log_trace!(logger, "Signer unblocked for commitment update, but unable to send due to resend order, waiting on signer for revoke and ack");
+                       self.context.signer_pending_commitment_update = true;
+                       commitment_update = None;
+               }
+
+               log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, {} funding_signed and {} channel_ready, with resend order {:?}",
                        if commitment_update.is_some() { "a" } else { "no" },
+                       if revoke_and_ack.is_some() { "a" } else { "no" },
                        if funding_signed.is_some() { "a" } else { "no" },
-                       if channel_ready.is_some() { "a" } else { "no" });
+                       if channel_ready.is_some() { "a" } else { "no" },
+                       self.context.resend_order);
 
                SignerResumeUpdates {
                        commitment_update,
+                       revoke_and_ack,
                        funding_signed,
                        channel_ready,
+                       order: self.context.resend_order.clone(),
                }
        }
 
-       fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
-               let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-               let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
-               msgs::RevokeAndACK {
-                       channel_id: self.context.channel_id,
-                       per_commitment_secret,
-                       next_per_commitment_point,
-                       #[cfg(taproot)]
-                       next_local_nonce: None,
+       fn get_last_revoke_and_ack<L: Deref>(&mut self, logger: &L) -> Option<msgs::RevokeAndACK> where L::Target: Logger {
+               debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2);
+               self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger);
+               let per_commitment_secret = self.context.holder_signer.as_ref()
+                       .release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2).ok();
+               if let (HolderCommitmentPoint::Available { current, .. }, Some(per_commitment_secret)) =
+                       (self.context.holder_commitment_point, per_commitment_secret) {
+                       self.context.signer_pending_revoke_and_ack = false;
+                       return Some(msgs::RevokeAndACK {
+                               channel_id: self.context.channel_id,
+                               per_commitment_secret,
+                               next_per_commitment_point: current,
+                               #[cfg(taproot)]
+                               next_local_nonce: None,
+                       })
+               }
+               if !self.context.holder_commitment_point.is_available() {
+                       log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
+                               &self.context.channel_id(), self.context.holder_commitment_point.transaction_number());
+               }
+               if per_commitment_secret.is_none() {
+                       log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment secret for {} is not available",
+                               &self.context.channel_id(), self.context.holder_commitment_point.transaction_number(),
+                               self.context.holder_commitment_point.transaction_number() + 2);
+               }
+               #[cfg(not(async_signing))] {
+                       panic!("Holder commitment point and per commitment secret must be available when generating revoke_and_ack");
+               }
+               #[cfg(async_signing)] {
+                       // Technically if we're at HolderCommitmentPoint::PendingNext,
+                       // we have a commitment point ready to send in an RAA, however we
+                       // choose to wait since if we send RAA now, we could get another
+                       // CS before we have any commitment point available. Blocking our
+                       // RAA here is a convenient way to make sure that post-funding
+                       // we're only ever waiting on one commitment point at a time.
+                       log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
+                               &self.context.channel_id(), self.context.holder_commitment_point.transaction_number());
+                       self.context.signer_pending_revoke_and_ack = true;
+                       None
                }
        }
 
@@ -5287,21 +5654,23 @@ impl<SP: Deref> Channel<SP> where
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
                        // just close here instead of trying to recover.
-                       return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+                       return Err(ChannelError::close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
                }
 
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
+                       return Err(ChannelError::close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
                }
 
-               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+               let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1;
                if msg.next_remote_commitment_number > 0 {
-                       let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
+                       let expected_point = self.context.holder_signer.as_ref()
+                               .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx)
+                               .expect("TODO: async signing is not yet supported for per commitment points upon channel reestablishment");
                        let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
-                               .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+                               .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
                        if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
-                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+                               return Err(ChannelError::close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                        }
                        if msg.next_remote_commitment_number > our_commitment_transaction {
                                macro_rules! log_and_panic {
@@ -5345,7 +5714,7 @@ impl<SP: Deref> Channel<SP> where
                        if !self.context.channel_state.is_our_channel_ready() ||
                                        self.context.channel_state.is_monitor_update_in_progress() {
                                if msg.next_remote_commitment_number != 0 {
-                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
+                                       return Err(ChannelError::close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
                                }
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok(ReestablishResponses {
@@ -5357,13 +5726,8 @@ impl<SP: Deref> Channel<SP> where
                        }
 
                        // We have OurChannelReady set!
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
                        return Ok(ReestablishResponses {
-                               channel_ready: Some(msgs::ChannelReady {
-                                       channel_id: self.context.channel_id(),
-                                       next_per_commitment_point,
-                                       short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                               }),
+                               channel_ready: Some(self.get_channel_ready()),
                                raa: None, commitment_update: None,
                                order: RAACommitmentOrder::CommitmentFirst,
                                shutdown_msg, announcement_sigs,
@@ -5379,11 +5743,11 @@ impl<SP: Deref> Channel<SP> where
                                self.context.monitor_pending_revoke_and_ack = true;
                                None
                        } else {
-                               Some(self.get_last_revoke_and_ack())
+                               self.get_last_revoke_and_ack(logger)
                        }
                } else {
                        debug_assert!(false, "All values should have been handled in the four cases above");
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
                                msg.next_remote_commitment_number,
                                our_commitment_transaction
@@ -5400,18 +5764,13 @@ impl<SP: Deref> Channel<SP> where
                }
                let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
 
-               let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+               let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() == 1 {
                        // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
-                       let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
-                       Some(msgs::ChannelReady {
-                               channel_id: self.context.channel_id(),
-                               next_per_commitment_point,
-                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                       })
+                       Some(self.get_channel_ready())
                } else { None };
 
                if msg.next_local_commitment_number == next_counterparty_commitment_number {
-                       if required_revoke.is_some() {
+                       if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack {
                                log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
                        } else {
                                log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
@@ -5424,7 +5783,7 @@ impl<SP: Deref> Channel<SP> where
                                order: self.context.resend_order.clone(),
                        })
                } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
-                       if required_revoke.is_some() {
+                       if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack {
                                log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
                        } else {
                                log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
@@ -5438,21 +5797,36 @@ impl<SP: Deref> Channel<SP> where
                                        order: self.context.resend_order.clone(),
                                })
                        } else {
+                               let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst
+                                       && self.context.signer_pending_revoke_and_ack {
+                                       log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id());
+                                       self.context.signer_pending_commitment_update = true;
+                                       None
+                               } else {
+                                       self.get_last_commitment_update_for_send(logger).ok()
+                               };
+                               let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst
+                                       && self.context.signer_pending_commitment_update && required_revoke.is_some() {
+                                       log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id());
+                                       self.context.signer_pending_revoke_and_ack = true;
+                                       None
+                               } else {
+                                       required_revoke
+                               };
                                Ok(ReestablishResponses {
                                        channel_ready, shutdown_msg, announcement_sigs,
-                                       raa: required_revoke,
-                                       commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
+                                       raa, commitment_update,
                                        order: self.context.resend_order.clone(),
                                })
                        }
                } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
-                       Err(ChannelError::Close(format!(
+                       Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
                                msg.next_local_commitment_number,
                                next_counterparty_commitment_number,
                        )))
                } else {
-                       Err(ChannelError::Close(format!(
+                       Err(ChannelError::close(format!(
                                "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
                                msg.next_local_commitment_number,
                                next_counterparty_commitment_number,
@@ -5527,7 +5901,7 @@ impl<SP: Deref> Channel<SP> where
        pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
                if self.closing_negotiation_ready() {
                        if self.context.closing_signed_in_flight {
-                               return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+                               return Err(ChannelError::close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
                        } else {
                                self.context.closing_signed_in_flight = true;
                        }
@@ -5572,7 +5946,7 @@ impl<SP: Deref> Channel<SP> where
                        ChannelSignerType::Ecdsa(ecdsa) => {
                                let sig = ecdsa
                                        .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
-                                       .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+                                       .map_err(|()| ChannelError::close("Failed to get signature for closing transaction.".to_owned()))?;
 
                                self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
                                Ok((Some(msgs::ClosingSigned {
@@ -5618,17 +5992,17 @@ impl<SP: Deref> Channel<SP> where
        ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
        {
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
                if self.context.channel_state.is_pre_funded_state() {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
-                       return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+                       return Err(ChannelError::close("Peer sent shutdown pre-funding generation".to_owned()));
                }
                for htlc in self.context.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
-                               return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+                               return Err(ChannelError::close("Got shutdown with remote pending HTLCs".to_owned()));
                        }
                }
                assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
@@ -5656,10 +6030,10 @@ impl<SP: Deref> Channel<SP> where
                                assert!(send_shutdown);
                                let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
                                        Ok(scriptpubkey) => scriptpubkey,
-                                       Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+                                       Err(_) => return Err(ChannelError::close("Failed to get shutdown scriptpubkey".to_owned())),
                                };
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
-                                       return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+                                       return Err(ChannelError::close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
                                }
                                self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
@@ -5741,20 +6115,20 @@ impl<SP: Deref> Channel<SP> where
                where F::Target: FeeEstimator
        {
                if !self.context.channel_state.is_both_sides_shutdown() {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+                       return Err(ChannelError::close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
                }
                if self.context.channel_state.is_peer_disconnected() {
-                       return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+                       return Err(ChannelError::close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
                }
                if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+                       return Err(ChannelError::close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
                }
                if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
-                       return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+                       return Err(ChannelError::close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
                }
 
                if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
-                       return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+                       return Err(ChannelError::close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
                }
 
                if self.context.channel_state.is_monitor_update_in_progress() {
@@ -5765,7 +6139,7 @@ impl<SP: Deref> Channel<SP> where
                let funding_redeemscript = self.context.get_funding_redeemscript();
                let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
                if used_total_fee != msg.fee_satoshis {
-                       return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+                       return Err(ChannelError::close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
                }
                let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
 
@@ -5782,7 +6156,7 @@ impl<SP: Deref> Channel<SP> where
 
                for outp in closing_tx.trust().built_transaction().output.iter() {
                        if !outp.script_pubkey.is_witness_program() && outp.value < Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS) {
-                               return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+                               return Err(ChannelError::close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
                        }
                }
 
@@ -5828,7 +6202,7 @@ impl<SP: Deref> Channel<SP> where
                                        ChannelSignerType::Ecdsa(ecdsa) => {
                                                let sig = ecdsa
                                                        .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
-                                                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+                                                       .map_err(|_| ChannelError::close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
                                                                closure_reason,
@@ -5870,7 +6244,7 @@ impl<SP: Deref> Channel<SP> where
 
                if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
                        if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
-                               return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+                               return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
                        }
                        if max_fee_satoshis < our_min_fee {
                                return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
@@ -5886,7 +6260,7 @@ impl<SP: Deref> Channel<SP> where
                                propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
                        } else {
                                if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
-                                       return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+                                       return Err(ChannelError::close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
                                                msg.fee_satoshis, our_min_fee, our_max_fee)));
                                }
                                // The proposed fee is in our acceptable range, accept it and broadcast!
@@ -5902,7 +6276,7 @@ impl<SP: Deref> Channel<SP> where
                                        } else if last_fee < our_max_fee {
                                                propose_fee!(our_max_fee);
                                        } else {
-                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+                                               return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
                                        }
                                } else {
                                        if msg.fee_satoshis > our_min_fee {
@@ -5910,7 +6284,7 @@ impl<SP: Deref> Channel<SP> where
                                        } else if last_fee > our_min_fee {
                                                propose_fee!(our_min_fee);
                                        } else {
-                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+                                               return Err(ChannelError::close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
                                        }
                                }
                        } else {
@@ -6053,7 +6427,7 @@ impl<SP: Deref> Channel<SP> where
        }
 
        pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
-               self.context.cur_holder_commitment_transaction_number + 1
+               self.context.holder_commitment_point.transaction_number() + 1
        }
 
        pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
@@ -6168,7 +6542,7 @@ impl<SP: Deref> Channel<SP> where
                        debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
                        return true;
                }
-               if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+               if self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER - 1 &&
                        self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
                        // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
                        // waiting for the initial monitor persistence. Thus, we check if our commitment
@@ -6227,7 +6601,9 @@ impl<SP: Deref> Channel<SP> where
                self.context.channel_update_status = status;
        }
 
-       fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+       fn check_get_channel_ready<L: Deref>(&mut self, height: u32, logger: &L) -> Option<msgs::ChannelReady>
+               where L::Target: Logger
+       {
                // Called:
                //  * always when a new block/transactions are confirmed with the new height
                //  * when funding is signed with a height of 0
@@ -6247,6 +6623,8 @@ impl<SP: Deref> Channel<SP> where
                // If we're still pending the signature on a funding transaction, then we're not ready to send a
                // channel_ready yet.
                if self.context.signer_pending_funding {
+                       // TODO: set signer_pending_channel_ready
+                       log_debug!(logger, "Can't produce channel_ready: the signer is pending funding.");
                        return None;
                }
 
@@ -6279,22 +6657,35 @@ impl<SP: Deref> Channel<SP> where
                        false
                };
 
-               if need_commitment_update {
-                       if !self.context.channel_state.is_monitor_update_in_progress() {
-                               if !self.context.channel_state.is_peer_disconnected() {
-                                       let next_per_commitment_point =
-                                               self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
-                                       return Some(msgs::ChannelReady {
-                                               channel_id: self.context.channel_id,
-                                               next_per_commitment_point,
-                                               short_channel_id_alias: Some(self.context.outbound_scid_alias),
-                                       });
-                               }
-                       } else {
-                               self.context.monitor_pending_channel_ready = true;
-                       }
+               if !need_commitment_update {
+                       log_debug!(logger, "Not producing channel_ready: we do not need a commitment update");
+                       return None;
+               }
+
+               if self.context.channel_state.is_monitor_update_in_progress() {
+                       log_debug!(logger, "Not producing channel_ready: a monitor update is in progress. Setting monitor_pending_channel_ready.");
+                       self.context.monitor_pending_channel_ready = true;
+                       return None;
+               }
+
+               if self.context.channel_state.is_peer_disconnected() {
+                       log_debug!(logger, "Not producing channel_ready: the peer is disconnected.");
+                       return None;
+               }
+
+               // TODO: when get_per_commiment_point becomes async, check if the point is
+               // available, if not, set signer_pending_channel_ready and return None
+
+               Some(self.get_channel_ready())
+       }
+
+       fn get_channel_ready(&self) -> msgs::ChannelReady {
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               msgs::ChannelReady {
+                       channel_id: self.context.channel_id(),
+                       next_per_commitment_point: self.context.holder_commitment_point.current_point(),
+                       short_channel_id_alias: Some(self.context.outbound_scid_alias),
                }
-               None
        }
 
        /// When a transaction is confirmed, we check whether it is or spends the funding transaction
@@ -6361,7 +6752,7 @@ impl<SP: Deref> Channel<SP> where
                                        // If we allow 1-conf funding, we may need to check for channel_ready here and
                                        // send it immediately instead of waiting for a best_block_updated call (which
                                        // may have already happened for this block).
-                                       if let Some(channel_ready) = self.check_get_channel_ready(height) {
+                                       if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
                                                log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
                                                let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
                                                msgs = (Some(channel_ready), announcement_sigs);
@@ -6427,7 +6818,7 @@ impl<SP: Deref> Channel<SP> where
 
                self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
 
-               if let Some(channel_ready) = self.check_get_channel_ready(height) {
+               if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
                        let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
                                self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
                        } else { None };
@@ -6660,12 +7051,12 @@ impl<SP: Deref> Channel<SP> where
                let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
 
                if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
                                 &announcement, self.context.get_counterparty_node_id())));
                }
                if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
-                       return Err(ChannelError::Close(format!(
+                       return Err(ChannelError::close(format!(
                                "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
                                &announcement, self.context.counterparty_funding_pubkey())));
                }
@@ -6730,7 +7121,7 @@ impl<SP: Deref> Channel<SP> where
 
                        // next_local_commitment_number is the next commitment_signed number we expect to
                        // receive (indicating if they need to resend one that we missed).
-                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+                       next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number(),
                        // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
                        // receive, however we track it by the next commitment number for a remote transaction
                        // (which is one further, as they always revoke previous commitment transaction, not
@@ -7024,6 +7415,7 @@ impl<SP: Deref> Channel<SP> where
                                        channel_id: self.context.channel_id,
                                        signature,
                                        htlc_signatures,
+                                       batch: None,
                                        #[cfg(taproot)]
                                        partial_signature_with_nonce: None,
                                }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
@@ -7186,13 +7578,14 @@ pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider
 }
 
 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
-       pub fn new<ES: Deref, F: Deref>(
+       pub fn new<ES: Deref, F: Deref, L: Deref>(
                fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
                channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
-               outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
+               outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>, logger: L
        ) -> Result<OutboundV1Channel<SP>, APIError>
        where ES::Target: EntropySource,
-             F::Target: FeeEstimator
+             F::Target: FeeEstimator,
+             L::Target: Logger,
        {
                let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
@@ -7224,6 +7617,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                channel_keys_id,
                                holder_signer,
                                pubkeys,
+                               logger,
                        )?,
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
@@ -7282,7 +7676,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7337,7 +7731,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again.
        pub fn is_resumable(&self) -> bool {
                !self.context.have_received_message() &&
-                       self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER
+                       self.context.holder_commitment_point.transaction_number() == INITIAL_COMMITMENT_NUMBER
        }
 
        pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
@@ -7348,11 +7742,12 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        panic!("Cannot generate an open_channel after we've moved forward");
                }
 
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Tried to send an open_channel for a channel that has already advanced");
                }
 
-               let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               let first_per_commitment_point = self.context.holder_commitment_point.current_point();
                let keys = self.context.get_holder_pubkeys();
 
                msgs::OpenChannel {
@@ -7385,136 +7780,11 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        }
 
        // Message handlers
-       pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
-               let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
-               // Check sanity of message fields:
-               if !self.context.is_outbound() {
-                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
-               }
-               if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
-                       return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
-               }
-               if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
-                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
-               }
-               if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
-                               msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
-               }
-               let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
-               if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
-                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
-               }
-               let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
-               if msg.common_fields.to_self_delay > max_delay_acceptable {
-                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
-               }
-               if msg.common_fields.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
-               }
-               if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
-               }
-
-               // Now check against optional parameters as set by config...
-               if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
-               }
-               if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
-               }
-               if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
-               }
-               if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
-               }
-               if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
-                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
-               }
-               if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
-                       return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
-               }
-
-               if let Some(ty) = &msg.common_fields.channel_type {
-                       if *ty != self.context.channel_type {
-                               return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
-                       }
-               } else if their_features.supports_channel_type() {
-                       // Assume they've accepted the channel type as they said they understand it.
-               } else {
-                       let channel_type = ChannelTypeFeatures::from_init(&their_features);
-                       if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                               return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
-                       }
-                       self.context.channel_type = channel_type.clone();
-                       self.context.channel_transaction_parameters.channel_type_features = channel_type;
-               }
-
-               let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
-                       match &msg.common_fields.shutdown_scriptpubkey {
-                               &Some(ref script) => {
-                                       // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
-                                       if script.len() == 0 {
-                                               None
-                                       } else {
-                                               if !script::is_bolt2_compliant(&script, their_features) {
-                                                       return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
-                                               }
-                                               Some(script.clone())
-                                       }
-                               },
-                               // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
-                               &None => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
-                               }
-                       }
-               } else { None };
-
-               self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
-               self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
-               self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
-               self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
-               self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
-
-               if peer_limits.trust_own_funding_0conf {
-                       self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
-               } else {
-                       self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
-               }
-
-               let counterparty_pubkeys = ChannelPublicKeys {
-                       funding_pubkey: msg.common_fields.funding_pubkey,
-                       revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
-                       payment_point: msg.common_fields.payment_basepoint,
-                       delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
-                       htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
-               };
-
-               self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
-                       selected_contest_delay: msg.common_fields.to_self_delay,
-                       pubkeys: counterparty_pubkeys,
-               });
-
-               self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
-               self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
-
-               self.context.channel_state = ChannelState::NegotiatingFunding(
-                       NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
-               );
-               self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
-
-               Ok(())
+       pub fn accept_channel(
+               &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits,
+               their_features: &InitFeatures
+       ) -> Result<(), ChannelError> {
+               self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis)
        }
 
        /// Handles a funding_signed message from the remote end.
@@ -7526,14 +7796,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                L::Target: Logger
        {
                if !self.context.is_outbound() {
-                       return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned())));
                }
                if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
-                       return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned())));
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7547,15 +7817,15 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
                        &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
 
-               let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+               let holder_signer = self.context.build_holder_transaction_keys();
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &holder_signer, true, false, logger).tx;
                {
                        let trusted_tx = initial_commitment_tx.trust();
                        let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                        let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
                        // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
                        if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
-                               return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
+                               return Err((self, ChannelError::close("Invalid funding_signed signature from peer".to_owned())));
                        }
                }
 
@@ -7570,7 +7840,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                let validated =
                        self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
                if validated.is_err() {
-                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+                       return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
                }
 
                let funding_redeemscript = self.context.get_funding_redeemscript();
@@ -7601,7 +7871,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                } else {
                        self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                }
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() {
+                       // We only fail to advance our commitment point/number if we're currently
+                       // waiting for our signer to unblock and provide a commitment point.
+                       // We cannot send open_channel before this has occurred, so if we
+                       // err here by the time we receive funding_signed, something has gone wrong.
+                       debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_signed");
+                       return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned())));
+               }
                self.context.cur_counterparty_commitment_transaction_number -= 1;
 
                log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
@@ -7612,7 +7889,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                        dual_funding_channel_context: None,
                };
 
-               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
                Ok((channel, channel_monitor))
        }
@@ -7642,28 +7919,28 @@ pub(super) fn channel_type_from_open_channel(
 ) -> Result<ChannelTypeFeatures, ChannelError> {
        if let Some(channel_type) = &common_fields.channel_type {
                if channel_type.supports_any_optional_bits() {
-                       return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+                       return Err(ChannelError::close("Channel Type field contained optional bits - this is not allowed".to_owned()));
                }
 
                // We only support the channel types defined by the `ChannelManager` in
                // `provided_channel_type_features`. The channel type must always support
                // `static_remote_key`.
                if !channel_type.requires_static_remote_key() {
-                       return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+                       return Err(ChannelError::close("Channel Type was not understood - we require static remote key".to_owned()));
                }
                // Make sure we support all of the features behind the channel type.
                if !channel_type.is_subset(our_supported_features) {
-                       return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+                       return Err(ChannelError::close("Channel Type contains unsupported features".to_owned()));
                }
                let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
                if channel_type.requires_scid_privacy() && announced_channel {
-                       return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+                       return Err(ChannelError::close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
                }
                Ok(channel_type.clone())
        } else {
                let channel_type = ChannelTypeFeatures::from_init(&their_features);
                if channel_type != ChannelTypeFeatures::only_static_remote_key() {
-                       return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+                       return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
                }
                Ok(channel_type)
        }
@@ -7737,7 +8014,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                ) {
                        panic!("Tried to send accept_channel after channel had moved forward");
                }
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Tried to send an accept_channel for a channel that has already advanced");
                }
 
@@ -7750,7 +8027,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        ///
        /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
        fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
-               let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+               debug_assert!(self.context.holder_commitment_point.is_available());
+               let first_per_commitment_point = self.context.holder_commitment_point.current_point();
                let keys = self.context.get_holder_pubkeys();
 
                msgs::AcceptChannel {
@@ -7792,8 +8070,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
                let funding_script = self.context.get_funding_redeemscript();
 
-               let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+               let keys = self.context.build_holder_transaction_keys();
+               let initial_commitment_tx = self.context.build_commitment_transaction(self.context.holder_commitment_point.transaction_number(), &keys, true, false, logger).tx;
                let trusted_tx = initial_commitment_tx.trust();
                let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
                let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
@@ -7814,7 +8092,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                L::Target: Logger
        {
                if self.context.is_outbound() {
-                       return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned())));
                }
                if !matches!(
                        self.context.channel_state, ChannelState::NegotiatingFunding(flags)
@@ -7823,11 +8101,11 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
                        // remember the channel, so it's safe to just send an error_message here and drop the
                        // channel.
-                       return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
+                       return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned())));
                }
                if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
-                               self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                               self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
                }
 
@@ -7859,7 +8137,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                );
 
                if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
-                       return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
+                       return Err((self, ChannelError::close("Failed to validate our commitment".to_owned())));
                }
 
                // Now that we're past error-generating stuff, update our local state:
@@ -7867,7 +8145,14 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
                self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
                self.context.cur_counterparty_commitment_transaction_number -= 1;
-               self.context.cur_holder_commitment_transaction_number -= 1;
+               if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() {
+                       // We only fail to advance our commitment point/number if we're currently
+                       // waiting for our signer to unblock and provide a commitment point.
+                       // We cannot send accept_channel before this has occurred, so if we
+                       // err here by the time we receive funding_created, something has gone wrong.
+                       debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_created");
+                       return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned())));
+               }
 
                let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
 
@@ -7901,7 +8186,7 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                        #[cfg(any(dual_funding, splicing))]
                        dual_funding_channel_context: None,
                };
-               let need_channel_ready = channel.check_get_channel_ready(0).is_some();
+               let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 
                Ok((channel, funding_signed, channel_monitor))
@@ -7919,14 +8204,15 @@ pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider
 
 #[cfg(any(dual_funding, splicing))]
 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
-       pub fn new<ES: Deref, F: Deref>(
+       pub fn new<ES: Deref, F: Deref, L: Deref>(
                fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
                counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
                user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
-               funding_confirmation_target: ConfirmationTarget,
+               funding_confirmation_target: ConfirmationTarget, logger: L,
        ) -> Result<OutboundV2Channel<SP>, APIError>
        where ES::Target: EntropySource,
              F::Target: FeeEstimator,
+             L::Target: Logger,
        {
                let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
                let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
@@ -7958,6 +8244,7 @@ impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
                                channel_keys_id,
                                holder_signer,
                                pubkeys,
+                               logger,
                        )?,
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
                        dual_funding_context: DualFundingChannelContext {
@@ -7988,16 +8275,18 @@ impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
                        debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
                }
 
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
                }
 
                let first_per_commitment_point = self.context.holder_signer.as_ref()
-                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
-                               &self.context.secp_ctx);
+                       .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(),
+                               &self.context.secp_ctx)
+                               .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment");
                let second_per_commitment_point = self.context.holder_signer.as_ref()
-                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
-                               &self.context.secp_ctx);
+                       .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1,
+                               &self.context.secp_ctx)
+                               .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment");
                let keys = self.context.get_holder_pubkeys();
 
                msgs::OpenChannelV2 {
@@ -8063,7 +8352,7 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
                // First check the channel type is known, failing before we do anything else if we don't
                // support this channel type.
                if msg.common_fields.channel_type.is_none() {
-                       return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
+                       return Err(ChannelError::close(format!("Rejecting V2 channel {} missing channel_type",
                                msg.common_fields.temporary_channel_id)))
                }
                let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
@@ -8130,7 +8419,7 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
                ) {
                        debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
                }
-               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+               if self.context.holder_commitment_point.transaction_number() != INITIAL_COMMITMENT_NUMBER {
                        debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
                }
 
@@ -8144,9 +8433,11 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
        /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
        fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
                let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
-                       self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+                       self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx)
+                       .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment");
                let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
-                       self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
+                       self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx)
+                       .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment");
                let keys = self.context.get_holder_pubkeys();
 
                msgs::AcceptChannelV2 {
@@ -8214,7 +8505,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures)
 const SERIALIZATION_VERSION: u8 = 4;
 const MIN_SERIALIZATION_VERSION: u8 = 3;
 
-impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
+impl_writeable_tlv_based_enum_legacy!(InboundHTLCRemovalReason,;
        (0, FailRelay),
        (1, FailMalformed),
        (2, Fulfill),
@@ -8319,7 +8610,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                }
                self.context.destination_script.write(writer)?;
 
-               self.context.cur_holder_commitment_transaction_number.write(writer)?;
+               self.context.holder_commitment_point.transaction_number().write(writer)?;
                self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
                self.context.value_to_self_msat.write(writer)?;
 
@@ -8593,6 +8884,10 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
                }
 
+               // `current_point` will become optional when async signing is implemented.
+               let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point());
+               let next_holder_commitment_point = self.context.holder_commitment_point.next_point();
+
                write_tlv_fields!(writer, {
                        (0, self.context.announcement_sigs, option),
                        // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
@@ -8629,7 +8924,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       // 45 and 47 are reserved for async signing
+                       (45, cur_holder_commitment_point, option),
+                       (47, next_holder_commitment_point, option),
                        (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
 
@@ -8940,6 +9236,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
                let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
 
+               let mut cur_holder_commitment_point_opt: Option<PublicKey> = None;
+               let mut next_holder_commitment_point_opt: Option<PublicKey> = None;
+
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (1, minimum_depth, option),
@@ -8970,7 +9269,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       // 45 and 47 are reserved for async signing
+                       (45, cur_holder_commitment_point_opt, option),
+                       (47, next_holder_commitment_point_opt, option),
                        (49, local_initiated_shutdown, option),
                });
 
@@ -9082,6 +9382,26 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        }
                }
 
+               // If we're restoring this channel for the first time after an upgrade, then we require that the
+               // signer be available so that we can immediately populate the current commitment point. Channel
+               // restoration will fail if this is not possible.
+               let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) {
+                       (Some(current), Some(next)) => HolderCommitmentPoint::Available {
+                               transaction_number: cur_holder_commitment_transaction_number, current, next
+                       },
+                       (Some(current), _) => HolderCommitmentPoint::PendingNext {
+                               transaction_number: cur_holder_commitment_transaction_number, current,
+                       },
+                       (_, _) => {
+                               // TODO(async_signing): remove this expect with the Uninitialized variant
+                               let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx)
+                                       .expect("Must be able to derive the current commitment point upon channel restoration");
+                               HolderCommitmentPoint::PendingNext {
+                                       transaction_number: cur_holder_commitment_transaction_number, current,
+                               }
+                       },
+               };
+
                Ok(Channel {
                        context: ChannelContext {
                                user_id,
@@ -9107,7 +9427,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                shutdown_scriptpubkey,
                                destination_script,
 
-                               cur_holder_commitment_transaction_number,
+                               holder_commitment_point,
                                cur_counterparty_commitment_transaction_number,
                                value_to_self_msat,
 
@@ -9126,6 +9446,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
                                monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
 
+                               signer_pending_revoke_and_ack: false,
                                signer_pending_commitment_update: false,
                                signer_pending_funding: false,
 
@@ -9337,11 +9658,12 @@ mod tests {
                keys_provider.expect(OnGetShutdownScriptpubkey {
                        returns: non_v0_segwit_shutdown_script.clone(),
                });
+               let logger = test_utils::TestLogger::new();
 
                let secp_ctx = Secp256k1::new();
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
+               match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger) {
                        Err(APIError::IncompatibleShutdownScript { script }) => {
                                assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
                        },
@@ -9361,10 +9683,11 @@ mod tests {
                let seed = [42; 32];
                let network = Network::Testnet;
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+               let logger = test_utils::TestLogger::new();
 
                let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                // Now change the fee so we can check that the fee in the open_channel message is the
                // same as the old fee.
@@ -9391,7 +9714,7 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
@@ -9471,10 +9794,11 @@ mod tests {
                let seed = [42; 32];
                let network = Network::Testnet;
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+               let logger = test_utils::TestLogger::new();
 
                let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
                let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
@@ -9523,7 +9847,7 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
@@ -9587,12 +9911,12 @@ mod tests {
                // Test that `OutboundV1Channel::new` creates a channel with the correct value for
                // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
                // which is set to the lower bound + 1 (2%) of the `channel_value`.
-               let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
+               let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger).unwrap();
                let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
                assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
 
                // Test with the upper bound - 1 of valid values (99%).
-               let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
+               let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger).unwrap();
                let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
                assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
 
@@ -9612,14 +9936,14 @@ mod tests {
 
                // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
                // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
-               let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
+               let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger).unwrap();
                let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
                assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
 
                // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
                // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
                // than 100.
-               let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
+               let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger).unwrap();
                let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
                assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
 
@@ -9672,7 +9996,7 @@ mod tests {
 
                let mut outbound_node_config = UserConfig::default();
                outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
-               let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
+               let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger).unwrap();
 
                let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
                assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
@@ -9709,7 +10033,7 @@ mod tests {
                // Create Node A's channel pointing to Node B's pubkey
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
-               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+               let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                // Create Node B's channel by receiving Node A's open_channel message
                // Make sure A's dust limit is as we expect.
@@ -9742,7 +10066,8 @@ mod tests {
                                chain_hash,
                                short_channel_id: 0,
                                timestamp: 0,
-                               flags: 0,
+                               message_flags: 1, // Only must_be_one
+                               channel_flags: 0,
                                cltv_expiry_delta: 100,
                                htlc_minimum_msat: 5,
                                htlc_maximum_msat: MAX_VALUE_MSAT,
@@ -9785,7 +10110,7 @@ mod tests {
                let config = UserConfig::default();
                let features = channelmanager::provided_init_features(&config);
                let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
-                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger
                ).unwrap();
                let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
                        &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
@@ -9939,7 +10264,7 @@ mod tests {
                let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let mut config = UserConfig::default();
                config.channel_handshake_config.announced_channel = false;
-               let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
+               let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None, &*logger).unwrap(); // Nothing uses their network key in this test
                chan.context.holder_dust_limit_satoshis = 546;
                chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
 
@@ -10686,7 +11011,7 @@ mod tests {
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
-                       node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
+                       node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap();
 
                let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
                channel_type_features.set_zero_conf_required();
@@ -10721,7 +11046,7 @@ mod tests {
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
-                       &config, 0, 42, None
+                       &config, 0, 42, None, &logger
                ).unwrap();
                assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
 
@@ -10732,7 +11057,7 @@ mod tests {
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
-                       None
+                       None, &logger
                ).unwrap();
 
                let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
@@ -10770,7 +11095,7 @@ mod tests {
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
-                       None
+                       None, &logger
                ).unwrap();
 
                // Set `channel_type` to `None` to force the implicit feature negotiation.
@@ -10817,7 +11142,7 @@ mod tests {
                let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
                        &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
-                       None
+                       None, &logger
                ).unwrap();
 
                let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
@@ -10836,7 +11161,7 @@ mod tests {
                // LDK.
                let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
-                       10000000, 100000, 42, &config, 0, 42, None
+                       10000000, 100000, 42, &config, 0, 42, None, &logger
                ).unwrap();
 
                let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
@@ -10886,7 +11211,8 @@ mod tests {
                        &config,
                        0,
                        42,
-                       None
+                       None,
+                       &logger
                ).unwrap();
 
                let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
@@ -10981,6 +11307,6 @@ mod tests {
                // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
                node_a_chan.set_batch_ready();
                assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
-               assert!(node_a_chan.check_get_channel_ready(0).is_some());
+               assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some());
        }
 }