Fix `cmp::max` execution in `ChannelContext::get_dust_buffer_feerate`
[rust-lightning] / lightning / src / ln / channel.rs
index 4b18e25e64191d8c423611aa86ea9fbdbad0912b..58d35a1a95a4ddfc05b29d7fcfe4db5bdf6e89a7 100644 (file)
@@ -50,7 +50,6 @@ use crate::util::scid_utils::scid_from_parts;
 use crate::io;
 use crate::prelude::*;
 use core::{cmp,mem,fmt};
-use core::convert::TryInto;
 use core::ops::Deref;
 #[cfg(any(test, fuzzing, debug_assertions))]
 use crate::sync::Mutex;
@@ -104,10 +103,38 @@ enum InboundHTLCRemovalReason {
        Fulfill(PaymentPreimage),
 }
 
+/// Represents the resolution status of an inbound HTLC.
+#[derive(Clone)]
+enum InboundHTLCResolution {
+       /// Resolved implies the action we must take with the inbound HTLC has already been determined,
+       /// i.e., we already know whether it must be failed back or forwarded.
+       //
+       // TODO: Once this variant is removed, we should also clean up
+       // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
+       Resolved {
+               pending_htlc_status: PendingHTLCStatus,
+       },
+       /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
+       /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
+       /// nodes for the state update in which it was proposed.
+       Pending {
+               update_add_htlc: msgs::UpdateAddHTLC,
+       },
+}
+
+impl_writeable_tlv_based_enum!(InboundHTLCResolution,
+       (0, Resolved) => {
+               (0, pending_htlc_status, required),
+       },
+       (2, Pending) => {
+               (0, update_add_htlc, required),
+       };
+);
+
 enum InboundHTLCState {
        /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
        /// update_add_htlc message for this HTLC.
-       RemoteAnnounced(PendingHTLCStatus),
+       RemoteAnnounced(InboundHTLCResolution),
        /// Included in a received commitment_signed message (implying we've
        /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
        /// state (see the example below). We have not yet included this HTLC in a
@@ -137,13 +164,13 @@ enum InboundHTLCState {
        /// Implies AwaitingRemoteRevoke.
        ///
        /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
-       AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
+       AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
        /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
        /// We have also included this HTLC in our latest commitment_signed and are now just waiting
        /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
        /// channel (before it can then get forwarded and/or removed).
        /// Implies AwaitingRemoteRevoke.
-       AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
+       AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
        Committed,
        /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
        /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
@@ -1044,6 +1071,7 @@ pub(super) struct MonitorRestoreUpdates {
        pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
        pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        pub finalized_claimed_htlcs: Vec<HTLCSource>,
+       pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
        pub funding_broadcastable: Option<Transaction>,
        pub channel_ready: Option<msgs::ChannelReady>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
@@ -1161,6 +1189,10 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
        UnfundedOutboundV1(OutboundV1Channel<SP>),
        UnfundedInboundV1(InboundV1Channel<SP>),
+       #[cfg(dual_funding)]
+       UnfundedOutboundV2(OutboundV2Channel<SP>),
+       #[cfg(dual_funding)]
+       UnfundedInboundV2(InboundV2Channel<SP>),
        Funded(Channel<SP>),
 }
 
@@ -1173,6 +1205,10 @@ impl<'a, SP: Deref> ChannelPhase<SP> where
                        ChannelPhase::Funded(chan) => &chan.context,
                        ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
                        ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+                       #[cfg(dual_funding)]
+                       ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
+                       #[cfg(dual_funding)]
+                       ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
                }
        }
 
@@ -1181,6 +1217,10 @@ impl<'a, SP: Deref> ChannelPhase<SP> where
                        ChannelPhase::Funded(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+                       #[cfg(dual_funding)]
+                       ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
+                       #[cfg(dual_funding)]
+                       ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
                }
        }
 }
@@ -1279,6 +1319,7 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        monitor_pending_finalized_fulfills: Vec<HTLCSource>,
+       monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
 
        /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
        /// but our signer (initially) refused to give us a signature, we should retry at some point in
@@ -1743,6 +1784,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
                        monitor_pending_finalized_fulfills: Vec::new(),
+                       monitor_pending_update_adds: Vec::new(),
 
                        signer_pending_commitment_update: false,
                        signer_pending_funding: false,
@@ -1848,7 +1890,10 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                current_chain_height: u32,
                outbound_scid_alias: u64,
                temporary_channel_id: Option<ChannelId>,
-               channel_type: ChannelTypeFeatures,
+               holder_selected_channel_reserve_satoshis: u64,
+               channel_keys_id: [u8; 32],
+               holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
+               pubkeys: ChannelPublicKeys,
        ) -> Result<ChannelContext<SP>, APIError>
                where
                        ES::Target: EntropySource,
@@ -1859,9 +1904,6 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                let channel_value_satoshis = funding_satoshis;
 
                let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
-               let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
-               let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
-               let pubkeys = holder_signer.pubkeys().clone();
 
                if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
                        return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
@@ -1876,13 +1918,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
                        return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
                }
-               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
-               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
-                       // Protocol level safety check in place, although it should never happen because
-                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
-                       return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
-               }
 
+               let channel_type = get_initial_channel_type(&config, their_features);
                debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
 
                let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
@@ -1939,6 +1976,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
                        announcement_sigs_state: AnnouncementSigsState::NotSent,
                        secp_ctx,
+                       // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
                        channel_value_satoshis,
 
                        latest_monitor_update_id: 0,
@@ -1968,10 +2006,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
                        monitor_pending_finalized_fulfills: Vec::new(),
+                       monitor_pending_update_adds: Vec::new(),
 
                        signer_pending_commitment_update: false,
                        signer_pending_funding: false,
 
+                       // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
+                       // when we receive `accept_channel2`.
                        #[cfg(debug_assertions)]
                        holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
                        #[cfg(debug_assertions)]
@@ -1992,6 +2033,8 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        counterparty_dust_limit_satoshis: 0,
                        holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
                        counterparty_max_htlc_value_in_flight_msat: 0,
+                       // We'll adjust this to include our counterparty's `funding_satoshis` when we
+                       // receive `accept_channel2`.
                        holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
                        counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
                        holder_selected_channel_reserve_satoshis,
@@ -2687,7 +2730,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
                let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
-               cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
+               cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
        }
 
        /// Get forwarding information for the counterparty.
@@ -3364,6 +3407,49 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        _ => todo!()
                }
        }
+
+       /// If we receive an error message when attempting to open a channel, it may only be a rejection
+       /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
+       /// downgrade of channel features would be possible so that we can still open the channel.
+       pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
+               &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
+       ) -> Result<(), ()>
+       where
+               F::Target: FeeEstimator
+       {
+               if !self.is_outbound() ||
+                       !matches!(
+                               self.channel_state, ChannelState::NegotiatingFunding(flags)
+                               if flags == NegotiatingFundingFlags::OUR_INIT_SENT
+                       )
+               {
+                       return Err(());
+               }
+               if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
+                       // We've exhausted our options
+                       return Err(());
+               }
+               // We support opening a few different types of channels. Try removing our additional
+               // features one by one until we've either arrived at our default or the counterparty has
+               // accepted one.
+               //
+               // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
+               // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
+               // checks whether the counterparty supports every feature, this would only happen if the
+               // counterparty is advertising the feature, but rejecting channels proposing the feature for
+               // whatever reason.
+               if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
+                       self.channel_type.clear_anchors_zero_fee_htlc_tx();
+                       self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
+                       assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
+               } else if self.channel_type.supports_scid_privacy() {
+                       self.channel_type.clear_scid_privacy();
+               } else {
+                       self.channel_type = ChannelTypeFeatures::only_static_remote_key();
+               }
+               self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
+               Ok(())
+       }
 }
 
 // Internal utility functions for channels
@@ -3415,6 +3501,7 @@ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channe
 ///
 /// This is used both for outbound and inbound channels and has lower bound
 /// of `dust_limit_satoshis`.
+#[cfg(dual_funding)]
 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
        // Fixed at 1% of channel value by spec.
        let (q, _) = channel_value_satoshis.overflowing_div(100);
@@ -4030,23 +4117,15 @@ impl<SP: Deref> Channel<SP> where
 
                log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
 
-               Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
+               Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
        }
 
-       pub fn update_add_htlc<F, FE: Deref, L: Deref>(
-               &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
-               create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
-       ) -> Result<(), ChannelError>
-       where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
-               FE::Target: FeeEstimator, L::Target: Logger,
-       {
+       pub fn update_add_htlc(
+               &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
+       ) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
-               // We can't accept HTLCs sent after we've sent a shutdown.
-               if self.context.channel_state.is_local_shutdown_sent() {
-                       pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
-               }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
                if self.context.channel_state.is_remote_shutdown_sent() {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
@@ -4065,7 +4144,6 @@ impl<SP: Deref> Channel<SP> where
                }
 
                let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
-               let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
                if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
                        return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
                }
@@ -4094,34 +4172,6 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
-               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
-               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
-                       (0, 0)
-               } else {
-                       let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
-                       (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
-                               dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
-               };
-               let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
-               if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
-                       let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
-                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                       on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               }
-
-               let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
-               if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
-                       let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
-                       if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
-                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
-                                       on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               }
-
                let pending_value_to_self_msat =
                        self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
                let pending_remote_value_msat =
@@ -4155,23 +4205,7 @@ impl<SP: Deref> Channel<SP> where
                } else {
                        0
                };
-               if !self.context.is_outbound() {
-                       // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
-                       // the spec because the fee spike buffer requirement doesn't exist on the receiver's
-                       // side, only on the sender's. Note that with anchor outputs we are no longer as
-                       // sensitive to fee spikes, so we need to account for them.
-                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
-                       let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
-                       if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
-                               remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
-                       }
-                       if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
-                               // Note that if the pending_forward_status is not updated here, then it's because we're already failing
-                               // the HTLC, i.e. its status is already set to failing.
-                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
-                               pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
-                       }
-               } else {
+               if self.context.is_outbound() {
                        // Check that they won't violate our local required channel reserve by adding this HTLC.
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
                        let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
@@ -4199,7 +4233,9 @@ impl<SP: Deref> Channel<SP> where
                        amount_msat: msg.amount_msat,
                        payment_hash: msg.payment_hash,
                        cltv_expiry: msg.cltv_expiry,
-                       state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+                       state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
+                               pending_htlc_status: pending_forward_status
+                       }),
                });
                Ok(())
        }
@@ -4405,13 +4441,13 @@ impl<SP: Deref> Channel<SP> where
                }
 
                for htlc in self.context.pending_inbound_htlcs.iter_mut() {
-                       let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
-                               Some(forward_info.clone())
+                       let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
+                               Some(resolution.clone())
                        } else { None };
-                       if let Some(forward_info) = new_forward {
+                       if let Some(htlc_resolution) = htlc_resolution {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
                                        &htlc.payment_hash, &self.context.channel_id);
-                               htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+                               htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
                                need_commitment = true;
                        }
                }
@@ -4721,6 +4757,7 @@ impl<SP: Deref> Channel<SP> where
 
                log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
                let mut to_forward_infos = Vec::new();
+               let mut pending_update_adds = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut finalized_claimed_htlcs = Vec::new();
                let mut update_fail_htlcs = Vec::new();
@@ -4768,29 +4805,37 @@ impl<SP: Deref> Channel<SP> where
                                        let mut state = InboundHTLCState::Committed;
                                        mem::swap(&mut state, &mut htlc.state);
 
-                                       if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+                                       if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
                                                log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
-                                               htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+                                               htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
                                                require_commitment = true;
-                                       } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
-                                               match forward_info {
-                                                       PendingHTLCStatus::Fail(fail_msg) => {
-                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
-                                                               require_commitment = true;
-                                                               match fail_msg {
-                                                                       HTLCFailureMsg::Relay(msg) => {
-                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
-                                                                               update_fail_htlcs.push(msg)
-                                                                       },
-                                                                       HTLCFailureMsg::Malformed(msg) => {
-                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
-                                                                               update_fail_malformed_htlcs.push(msg)
+                                       } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
+                                               match resolution {
+                                                       InboundHTLCResolution::Resolved { pending_htlc_status } =>
+                                                               match pending_htlc_status {
+                                                                       PendingHTLCStatus::Fail(fail_msg) => {
+                                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
+                                                                               require_commitment = true;
+                                                                               match fail_msg {
+                                                                                       HTLCFailureMsg::Relay(msg) => {
+                                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+                                                                                               update_fail_htlcs.push(msg)
+                                                                                       },
+                                                                                       HTLCFailureMsg::Malformed(msg) => {
+                                                                                               htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+                                                                                               update_fail_malformed_htlcs.push(msg)
+                                                                                       },
+                                                                               }
                                                                        },
+                                                                       PendingHTLCStatus::Forward(forward_info) => {
+                                                                               log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
+                                                                               to_forward_infos.push((forward_info, htlc.htlc_id));
+                                                                               htlc.state = InboundHTLCState::Committed;
+                                                                       }
                                                                }
-                                                       },
-                                                       PendingHTLCStatus::Forward(forward_info) => {
+                                                       InboundHTLCResolution::Pending { update_add_htlc } => {
                                                                log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
-                                                               to_forward_infos.push((forward_info, htlc.htlc_id));
+                                                               pending_update_adds.push(update_add_htlc);
                                                                htlc.state = InboundHTLCState::Committed;
                                                        }
                                                }
@@ -4851,6 +4896,8 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
+               self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
+
                if self.context.channel_state.is_monitor_update_in_progress() {
                        // We can't actually generate a new commitment transaction (incl by freeing holding
                        // cells) while we can't update the monitor, so we just return what we have.
@@ -5151,13 +5198,16 @@ impl<SP: Deref> Channel<SP> where
                mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
                let mut finalized_claimed_htlcs = Vec::new();
                mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+               let mut pending_update_adds = Vec::new();
+               mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
 
                if self.context.channel_state.is_peer_disconnected() {
                        self.context.monitor_pending_revoke_and_ack = false;
                        self.context.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
                                raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
-                               accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+                               accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
+                               funding_broadcastable, channel_ready, announcement_sigs
                        };
                }
 
@@ -5179,7 +5229,8 @@ impl<SP: Deref> Channel<SP> where
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
-                       raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+                       raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
+                       pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
                }
        }
 
@@ -5419,7 +5470,7 @@ impl<SP: Deref> Channel<SP> where
 
                let shutdown_msg = self.get_outbound_shutdown();
 
-               let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
+               let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
 
                if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
@@ -6043,6 +6094,86 @@ impl<SP: Deref> Channel<SP> where
                        })
        }
 
+       pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
+               &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
+       ) -> Result<(), (&'static str, u16)>
+       where
+               F::Target: FeeEstimator,
+               L::Target: Logger
+       {
+               if self.context.channel_state.is_local_shutdown_sent() {
+                       return Err(("Shutdown was already sent", 0x4000|8))
+               }
+
+               let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+               let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+               let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+               let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       (0, 0)
+               } else {
+                       let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+                       (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
+                               dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
+               };
+               let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+               if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+                       let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+                       if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+                                       on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+                               return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
+                       }
+               }
+
+               let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+               if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+                       let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+                       if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
+                               log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+                                       on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
+                               return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
+                       }
+               }
+
+               let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
+               } else {
+                       0
+               };
+
+               let mut removed_outbound_total_msat = 0;
+               for ref htlc in self.context.pending_outbound_htlcs.iter() {
+                       if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
+                               removed_outbound_total_msat += htlc.amount_msat;
+                       } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
+                               removed_outbound_total_msat += htlc.amount_msat;
+                       }
+               }
+
+               let pending_value_to_self_msat =
+                       self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+               let pending_remote_value_msat =
+                       self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+
+               if !self.context.is_outbound() {
+                       // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+                       // the spec because the fee spike buffer requirement doesn't exist on the receiver's
+                       // side, only on the sender's. Note that with anchor outputs we are no longer as
+                       // sensitive to fee spikes, so we need to account for them.
+                       let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+                       let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+                       if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
+                       }
+                       if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
+                               log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
+                               return Err(("Fee spike buffer violation", 0x1000|7));
+                       }
+               }
+
+               Ok(())
+       }
+
        pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
                self.context.cur_holder_commitment_transaction_number + 1
        }
@@ -7165,7 +7296,17 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        where ES::Target: EntropySource,
              F::Target: FeeEstimator
        {
-               let channel_type = Self::get_initial_channel_type(&config, their_features);
+               let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+               if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+                       // Protocol level safety check in place, although it should never happen because
+                       // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+                       return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
+                               implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+               }
+
+               let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+               let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+               let pubkeys = holder_signer.pubkeys().clone();
 
                let chan = Self {
                        context: ChannelContext::new_for_outbound_channel(
@@ -7181,7 +7322,10 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                current_chain_height,
                                outbound_scid_alias,
                                temporary_channel_id,
-                               channel_type,
+                               holder_selected_channel_reserve_satoshis,
+                               channel_keys_id,
+                               holder_signer,
+                               pubkeys,
                        )?,
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
@@ -7279,29 +7423,6 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                Ok(funding_created)
        }
 
-       fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
-               // The default channel type (ie the first one we try) depends on whether the channel is
-               // public - if it is, we just go with `only_static_remotekey` as it's the only option
-               // available. If it's private, we first try `scid_privacy` as it provides better privacy
-               // with no other changes, and fall back to `only_static_remotekey`.
-               let mut ret = ChannelTypeFeatures::only_static_remote_key();
-               if !config.channel_handshake_config.announced_channel &&
-                       config.channel_handshake_config.negotiate_scid_privacy &&
-                       their_features.supports_scid_privacy() {
-                       ret.set_scid_privacy_required();
-               }
-
-               // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
-               // set it now. If they don't understand it, we'll fall back to our default of
-               // `only_static_remotekey`.
-               if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
-                       their_features.supports_anchors_zero_fee_htlc_tx() {
-                       ret.set_anchors_zero_fee_htlc_tx_required();
-               }
-
-               ret
-       }
-
        /// If we receive an error message, it may only be a rejection of the channel type we tried,
        /// not of our ability to open any channel at all. Thus, on error, we should first call this
        /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
@@ -7311,37 +7432,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
        where
                F::Target: FeeEstimator
        {
-               if !self.context.is_outbound() ||
-                       !matches!(
-                               self.context.channel_state, ChannelState::NegotiatingFunding(flags)
-                               if flags == NegotiatingFundingFlags::OUR_INIT_SENT
-                       )
-               {
-                       return Err(());
-               }
-               if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
-                       // We've exhausted our options
-                       return Err(());
-               }
-               // We support opening a few different types of channels. Try removing our additional
-               // features one by one until we've either arrived at our default or the counterparty has
-               // accepted one.
-               //
-               // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
-               // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
-               // checks whether the counterparty supports every feature, this would only happen if the
-               // counterparty is advertising the feature, but rejecting channels proposing the feature for
-               // whatever reason.
-               if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
-                       self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
-                       self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
-                       assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
-               } else if self.context.channel_type.supports_scid_privacy() {
-                       self.context.channel_type.clear_scid_privacy();
-               } else {
-                       self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
-               }
-               self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
+               self.context.maybe_downgrade_channel_features(fee_estimator)?;
                Ok(self.get_open_channel(chain_hash))
        }
 
@@ -7913,6 +8004,130 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
        }
 }
 
+// A not-yet-funded outbound (from holder) channel using V2 channel establishment.
+#[cfg(dual_funding)]
+pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
+       pub context: ChannelContext<SP>,
+       pub unfunded_context: UnfundedChannelContext,
+       #[cfg(dual_funding)]
+       pub dual_funding_context: DualFundingChannelContext,
+}
+
+#[cfg(dual_funding)]
+impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
+       pub fn new<ES: Deref, F: Deref>(
+               fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+               counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
+               user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
+               funding_confirmation_target: ConfirmationTarget,
+       ) -> Result<OutboundV2Channel<SP>, APIError>
+       where ES::Target: EntropySource,
+             F::Target: FeeEstimator,
+       {
+               let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
+               let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
+               let pubkeys = holder_signer.pubkeys().clone();
+
+               let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
+
+               let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
+                       funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+
+               let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
+               let funding_tx_locktime = current_chain_height;
+
+               let chan = Self {
+                       context: ChannelContext::new_for_outbound_channel(
+                               fee_estimator,
+                               entropy_source,
+                               signer_provider,
+                               counterparty_node_id,
+                               their_features,
+                               funding_satoshis,
+                               0,
+                               user_id,
+                               config,
+                               current_chain_height,
+                               outbound_scid_alias,
+                               temporary_channel_id,
+                               holder_selected_channel_reserve_satoshis,
+                               channel_keys_id,
+                               holder_signer,
+                               pubkeys,
+                       )?,
+                       unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
+                       dual_funding_context: DualFundingChannelContext {
+                               our_funding_satoshis: funding_satoshis,
+                               their_funding_satoshis: 0,
+                               funding_tx_locktime,
+                               funding_feerate_sat_per_1000_weight,
+                       }
+               };
+               Ok(chan)
+       }
+
+       /// If we receive an error message, it may only be a rejection of the channel type we tried,
+       /// not of our ability to open any channel at all. Thus, on error, we should first call this
+       /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
+       pub(crate) fn maybe_handle_error_without_close<F: Deref>(
+               &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
+       ) -> Result<msgs::OpenChannelV2, ()>
+       where
+               F::Target: FeeEstimator
+       {
+               self.context.maybe_downgrade_channel_features(fee_estimator)?;
+               Ok(self.get_open_channel_v2(chain_hash))
+       }
+
+       pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
+               if self.context.have_received_message() {
+                       debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
+               }
+
+               if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+                       debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
+               }
+
+               let first_per_commitment_point = self.context.holder_signer.as_ref()
+                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
+                               &self.context.secp_ctx);
+               let second_per_commitment_point = self.context.holder_signer.as_ref()
+                       .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
+                               &self.context.secp_ctx);
+               let keys = self.context.get_holder_pubkeys();
+
+               msgs::OpenChannelV2 {
+                       common_fields: msgs::CommonOpenChannelFields {
+                               chain_hash,
+                               temporary_channel_id: self.context.temporary_channel_id.unwrap(),
+                               funding_satoshis: self.context.channel_value_satoshis,
+                               dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+                               max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+                               htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+                               commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
+                               to_self_delay: self.context.get_holder_selected_contest_delay(),
+                               max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+                               funding_pubkey: keys.funding_pubkey,
+                               revocation_basepoint: keys.revocation_basepoint.to_public_key(),
+                               payment_basepoint: keys.payment_point,
+                               delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
+                               htlc_basepoint: keys.htlc_basepoint.to_public_key(),
+                               first_per_commitment_point,
+                               channel_flags: if self.context.config.announced_channel {1} else {0},
+                               shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+                                       Some(script) => script.clone().into_inner(),
+                                       None => Builder::new().into_script(),
+                               }),
+                               channel_type: Some(self.context.channel_type.clone()),
+                       },
+                       funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
+                       second_per_commitment_point,
+                       locktime: self.dual_funding_context.funding_tx_locktime,
+                       require_confirmed_inputs: None,
+               }
+       }
+}
+
 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
 #[cfg(dual_funding)]
 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
@@ -8067,7 +8282,32 @@ impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
        }
 }
 
-const SERIALIZATION_VERSION: u8 = 3;
+// Unfunded channel utilities
+
+fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
+       // The default channel type (ie the first one we try) depends on whether the channel is
+       // public - if it is, we just go with `only_static_remotekey` as it's the only option
+       // available. If it's private, we first try `scid_privacy` as it provides better privacy
+       // with no other changes, and fall back to `only_static_remotekey`.
+       let mut ret = ChannelTypeFeatures::only_static_remote_key();
+       if !config.channel_handshake_config.announced_channel &&
+               config.channel_handshake_config.negotiate_scid_privacy &&
+               their_features.supports_scid_privacy() {
+               ret.set_scid_privacy_required();
+       }
+
+       // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
+       // set it now. If they don't understand it, we'll fall back to our default of
+       // `only_static_remotekey`.
+       if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+               their_features.supports_anchors_zero_fee_htlc_tx() {
+               ret.set_anchors_zero_fee_htlc_tx_required();
+       }
+
+       ret
+}
+
+const SERIALIZATION_VERSION: u8 = 4;
 const MIN_SERIALIZATION_VERSION: u8 = 3;
 
 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
@@ -8129,7 +8369,18 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
                // called.
 
-               write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
+               let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
+                       InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
+                               InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
+                               matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
+                       },
+                       _ => false,
+               }) {
+                       SERIALIZATION_VERSION
+               } else {
+                       MIN_SERIALIZATION_VERSION
+               };
+               write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
 
                // `user_id` used to be a single u64 value. In order to remain backwards compatible with
                // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
@@ -8185,13 +8436,29 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        htlc.payment_hash.write(writer)?;
                        match &htlc.state {
                                &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
-                               &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
+                               &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
                                        1u8.write(writer)?;
-                                       htlc_state.write(writer)?;
+                                       if version_to_write <= 3 {
+                                               if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
+                                                       pending_htlc_status.write(writer)?;
+                                               } else {
+                                                       panic!();
+                                               }
+                                       } else {
+                                               htlc_resolution.write(writer)?;
+                                       }
                                },
-                               &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
+                               &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
                                        2u8.write(writer)?;
-                                       htlc_state.write(writer)?;
+                                       if version_to_write <= 3 {
+                                               if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
+                                                       pending_htlc_status.write(writer)?;
+                                               } else {
+                                                       panic!();
+                                               }
+                                       } else {
+                                               htlc_resolution.write(writer)?;
+                                       }
                                },
                                &InboundHTLCState::Committed => {
                                        3u8.write(writer)?;
@@ -8417,6 +8684,11 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
 
                let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
 
+               let mut monitor_pending_update_adds = None;
+               if !self.context.monitor_pending_update_adds.is_empty() {
+                       monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
+               }
+
                write_tlv_fields!(writer, {
                        (0, self.context.announcement_sigs, option),
                        // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
@@ -8434,6 +8706,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (7, self.context.shutdown_scriptpubkey, option),
                        (8, self.context.blocked_monitor_updates, optional_vec),
                        (9, self.context.target_closing_feerate_sats_per_kw, option),
+                       (10, monitor_pending_update_adds, option), // Added in 0.0.122
                        (11, self.context.monitor_pending_finalized_fulfills, required_vec),
                        (13, self.context.channel_creation_height, required),
                        (15, preimages, required_vec),
@@ -8452,7 +8725,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
+                       // 45 and 47 are reserved for async signing
+                       (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
 
                Ok(())
@@ -8528,8 +8802,22 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                cltv_expiry: Readable::read(reader)?,
                                payment_hash: Readable::read(reader)?,
                                state: match <u8 as Readable>::read(reader)? {
-                                       1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
-                                       2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
+                                       1 => {
+                                               let resolution = if ver <= 3 {
+                                                       InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
+                                               } else {
+                                                       Readable::read(reader)?
+                                               };
+                                               InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
+                                       },
+                                       2 => {
+                                               let resolution = if ver <= 3 {
+                                                       InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
+                                               } else {
+                                                       Readable::read(reader)?
+                                               };
+                                               InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
+                                       },
                                        3 => InboundHTLCState::Committed,
                                        4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
                                        _ => return Err(DecodeError::InvalidValue),
@@ -8746,6 +9034,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
 
                let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
+               let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
 
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
@@ -8758,6 +9047,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (7, shutdown_scriptpubkey, option),
                        (8, blocked_monitor_updates, optional_vec),
                        (9, target_closing_feerate_sats_per_kw, option),
+                       (10, monitor_pending_update_adds, option), // Added in 0.0.122
                        (11, monitor_pending_finalized_fulfills, optional_vec),
                        (13, channel_creation_height, option),
                        (15, preimages_opt, optional_vec),
@@ -8776,7 +9066,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
-                       (45, local_initiated_shutdown, option),
+                       // 45 and 47 are reserved for async signing
+                       (49, local_initiated_shutdown, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -8929,6 +9220,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                monitor_pending_forwards,
                                monitor_pending_failures,
                                monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
+                               monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
 
                                signer_pending_commitment_update: false,
                                signer_pending_funding: false,