Merge pull request #3021 from TheBlueMatt/2024-04-drop-blocked-completed-updates
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 25 Apr 2024 18:33:18 +0000 (11:33 -0700)
committerGitHub <noreply@github.com>
Thu, 25 Apr 2024 18:33:18 +0000 (11:33 -0700)
Drop completed blocked `ChannelMonitorUpdate`s on startup

1  2 
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/monitor_tests.rs

index c02659cd0b01cc83548b988c362929074c56808d,0608db10da4054dce5c96fc0dba5160f648a1376..66301c95bd6d378d828003ae9260a54eb9ade578
@@@ -50,6 -50,7 +50,6 @@@ use crate::util::scid_utils::scid_from_
  use crate::io;
  use crate::prelude::*;
  use core::{cmp,mem,fmt};
 -use core::convert::TryInto;
  use core::ops::Deref;
  #[cfg(any(test, fuzzing, debug_assertions))]
  use crate::sync::Mutex;
@@@ -103,38 -104,10 +103,38 @@@ enum InboundHTLCRemovalReason 
        Fulfill(PaymentPreimage),
  }
  
 +/// Represents the resolution status of an inbound HTLC.
 +#[derive(Clone)]
 +enum InboundHTLCResolution {
 +      /// Resolved implies the action we must take with the inbound HTLC has already been determined,
 +      /// i.e., we already know whether it must be failed back or forwarded.
 +      //
 +      // TODO: Once this variant is removed, we should also clean up
 +      // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
 +      Resolved {
 +              pending_htlc_status: PendingHTLCStatus,
 +      },
 +      /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
 +      /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
 +      /// nodes for the state update in which it was proposed.
 +      Pending {
 +              update_add_htlc: msgs::UpdateAddHTLC,
 +      },
 +}
 +
 +impl_writeable_tlv_based_enum!(InboundHTLCResolution,
 +      (0, Resolved) => {
 +              (0, pending_htlc_status, required),
 +      },
 +      (2, Pending) => {
 +              (0, update_add_htlc, required),
 +      };
 +);
 +
  enum InboundHTLCState {
        /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
        /// update_add_htlc message for this HTLC.
 -      RemoteAnnounced(PendingHTLCStatus),
 +      RemoteAnnounced(InboundHTLCResolution),
        /// Included in a received commitment_signed message (implying we've
        /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
        /// state (see the example below). We have not yet included this HTLC in a
        /// Implies AwaitingRemoteRevoke.
        ///
        /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
 -      AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
 +      AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
        /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
        /// We have also included this HTLC in our latest commitment_signed and are now just waiting
        /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
        /// channel (before it can then get forwarded and/or removed).
        /// Implies AwaitingRemoteRevoke.
 -      AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
 +      AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
        Committed,
        /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
        /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
        LocalRemoved(InboundHTLCRemovalReason),
  }
  
 +/// Exposes the state of pending inbound HTLCs.
 +///
 +/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
 +/// through the following states in the state machine:
 +/// - Announced for addition by the originating node through the update_add_htlc message.
 +/// - Added to the commitment transaction of the receiving node and originating node in turn
 +///   through the exchange of commitment_signed and revoke_and_ack messages.
 +/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
 +///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
 +/// - Removed from the commitment transaction of the originating node and receiving node in turn
 +///   through the exchange of commitment_signed and revoke_and_ack messages.
 +///
 +/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
 +#[derive(Clone, Debug, PartialEq)]
 +pub enum InboundHTLCStateDetails {
 +      /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
 +      /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
 +      /// before this HTLC is included on the remote commitment transaction.
 +      AwaitingRemoteRevokeToAdd,
 +      /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
 +      /// and is included in both commitment transactions.
 +      ///
 +      /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
 +      /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
 +      /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
 +      /// payment, it will only be claimed together with other required parts.
 +      Committed,
 +      /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
 +      /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
 +      /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
 +      /// commitment transaction after update_fulfill_htlc.
 +      AwaitingRemoteRevokeToRemoveFulfill,
 +      /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
 +      /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
 +      /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
 +      /// transaction.
 +      AwaitingRemoteRevokeToRemoveFail,
 +}
 +
 +impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
 +      fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
 +              match state {
 +                      InboundHTLCState::RemoteAnnounced(_) => None,
 +                      InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
 +                              Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
 +                      InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
 +                              Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
 +                      InboundHTLCState::Committed =>
 +                              Some(InboundHTLCStateDetails::Committed),
 +                      InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
 +                              Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
 +                      InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
 +                              Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
 +                      InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
 +                              Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
 +              }
 +      }
 +}
 +
 +impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
 +      (0, AwaitingRemoteRevokeToAdd) => {},
 +      (2, Committed) => {},
 +      (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
 +      (6, AwaitingRemoteRevokeToRemoveFail) => {};
 +);
 +
  struct InboundHTLCOutput {
        htlc_id: u64,
        amount_msat: u64,
        state: InboundHTLCState,
  }
  
 +/// Exposes details around pending inbound HTLCs.
 +#[derive(Clone, Debug, PartialEq)]
 +pub struct InboundHTLCDetails {
 +      /// The HTLC ID.
 +      /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
 +      /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
 +      /// and not part of any commitment transaction.
 +      pub htlc_id: u64,
 +      /// The amount in msat.
 +      pub amount_msat: u64,
 +      /// The block height at which this HTLC expires.
 +      pub cltv_expiry: u32,
 +      /// The payment hash.
 +      pub payment_hash: PaymentHash,
 +      /// The state of the HTLC in the state machine.
 +      ///
 +      /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
 +      /// waiting for to advance to the next state.
 +      ///
 +      /// See [`InboundHTLCStateDetails`] for information on the specific states.
 +      ///
 +      /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
 +      /// states may result in `None` here.
 +      pub state: Option<InboundHTLCStateDetails>,
 +      /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
 +      /// from the local commitment transaction and added to the commitment transaction fee.
 +      /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
 +      /// transactions as well.
 +      ///
 +      /// When the local commitment transaction is broadcasted as part of a unilateral closure,
 +      /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
 +      /// fee.
 +      ///
 +      /// Note that dust limits are specific to each party. An HTLC can be dust for the local
 +      /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
 +      pub is_dust: bool,
 +}
 +
 +impl_writeable_tlv_based!(InboundHTLCDetails, {
 +      (0, htlc_id, required),
 +      (2, amount_msat, required),
 +      (4, cltv_expiry, required),
 +      (6, payment_hash, required),
 +      (7, state, upgradable_option),
 +      (8, is_dust, required),
 +});
 +
  #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
  enum OutboundHTLCState {
        /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
        AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
  }
  
 +/// Exposes the state of pending outbound HTLCs.
 +///
 +/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
 +/// through the following states in the state machine:
 +/// - Announced for addition by the originating node through the update_add_htlc message.
 +/// - Added to the commitment transaction of the receiving node and originating node in turn
 +///   through the exchange of commitment_signed and revoke_and_ack messages.
 +/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
 +///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
 +/// - Removed from the commitment transaction of the originating node and receiving node in turn
 +///   through the exchange of commitment_signed and revoke_and_ack messages.
 +///
 +/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
 +#[derive(Clone, Debug, PartialEq)]
 +pub enum OutboundHTLCStateDetails {
 +      /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
 +      /// on the remote's commitment transaction after update_add_htlc.
 +      AwaitingRemoteRevokeToAdd,
 +      /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
 +      /// and receiving revoke_and_ack in return.
 +      ///
 +      /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
 +      /// unilaterally close the channel due to a timeout with an uncooperative remote node.
 +      Committed,
 +      /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
 +      /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
 +      /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
 +      /// for the removal from its commitment transaction.
 +      AwaitingRemoteRevokeToRemoveSuccess,
 +      /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
 +      /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
 +      /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
 +      /// for the removal from its commitment transaction.
 +      AwaitingRemoteRevokeToRemoveFailure,
 +}
 +
 +impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
 +      fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
 +              match state {
 +                      OutboundHTLCState::LocalAnnounced(_) =>
 +                              OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
 +                      OutboundHTLCState::Committed =>
 +                              OutboundHTLCStateDetails::Committed,
 +                      // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
 +                      // the state yet.
 +                      OutboundHTLCState::RemoteRemoved(_) =>
 +                              OutboundHTLCStateDetails::Committed,
 +                      OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
 +                              OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
 +                      OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
 +                              OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
 +                      OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
 +                              OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
 +                      OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
 +                              OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
 +              }
 +      }
 +}
 +
 +impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
 +      (0, AwaitingRemoteRevokeToAdd) => {},
 +      (2, Committed) => {},
 +      (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
 +      (6, AwaitingRemoteRevokeToRemoveFailure) => {};
 +);
 +
  #[derive(Clone)]
  #[cfg_attr(test, derive(Debug, PartialEq))]
  enum OutboundHTLCOutcome {
@@@ -443,58 -237,6 +443,58 @@@ struct OutboundHTLCOutput 
        skimmed_fee_msat: Option<u64>,
  }
  
 +/// Exposes details around pending outbound HTLCs.
 +#[derive(Clone, Debug, PartialEq)]
 +pub struct OutboundHTLCDetails {
 +      /// The HTLC ID.
 +      /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
 +      /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
 +      /// and not part of any commitment transaction.
 +      ///
 +      /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
 +      pub htlc_id: Option<u64>,
 +      /// The amount in msat.
 +      pub amount_msat: u64,
 +      /// The block height at which this HTLC expires.
 +      pub cltv_expiry: u32,
 +      /// The payment hash.
 +      pub payment_hash: PaymentHash,
 +      /// The state of the HTLC in the state machine.
 +      ///
 +      /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
 +      /// waiting for to advance to the next state.
 +      ///
 +      /// See [`OutboundHTLCStateDetails`] for information on the specific states.
 +      ///
 +      /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
 +      /// states may result in `None` here.
 +      pub state: Option<OutboundHTLCStateDetails>,
 +      /// The extra fee being skimmed off the top of this HTLC.
 +      pub skimmed_fee_msat: Option<u64>,
 +      /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
 +      /// from the local commitment transaction and added to the commitment transaction fee.
 +      /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
 +      /// transactions as well.
 +      ///
 +      /// When the local commitment transaction is broadcasted as part of a unilateral closure,
 +      /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
 +      /// fee.
 +      ///
 +      /// Note that dust limits are specific to each party. An HTLC can be dust for the local
 +      /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
 +      pub is_dust: bool,
 +}
 +
 +impl_writeable_tlv_based!(OutboundHTLCDetails, {
 +      (0, htlc_id, required),
 +      (2, amount_msat, required),
 +      (4, cltv_expiry, required),
 +      (6, payment_hash, required),
 +      (7, state, upgradable_option),
 +      (8, skimmed_fee_msat, required),
 +      (10, is_dust, required),
 +});
 +
  /// See AwaitingRemoteRevoke ChannelState for more info
  #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
  enum HTLCUpdateAwaitingACK {
  }
  
  macro_rules! define_state_flags {
 -      ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
 +      ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
                #[doc = $flag_type_doc]
                #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
                struct $flag_type(u32);
  
                        #[allow(unused)]
                        fn is_empty(&self) -> bool { self.0 == 0 }
 -
                        #[allow(unused)]
                        fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
 +                      #[allow(unused)]
 +                      fn set(&mut self, flag: Self) { *self |= flag }
 +                      #[allow(unused)]
 +                      fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
                }
  
 -              impl core::ops::Not for $flag_type {
 -                      type Output = Self;
 -                      fn not(self) -> Self::Output { Self(!self.0) }
 -              }
 +              $(
 +                      define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
 +              )*
 +
                impl core::ops::BitOr for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
        ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
        };
 +      ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
 +              impl $flag_type {
 +                      #[allow(unused)]
 +                      fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
 +                      #[allow(unused)]
 +                      fn $set(&mut self) { self.set($flag_type::new() | $flag) }
 +                      #[allow(unused)]
 +                      fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
 +              }
 +      };
        ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
 +
 +              define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
 +                      is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
 +              define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
 +                      is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
 +              define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
 +                      is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
 +              define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
 +                      is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
 +
                impl core::ops::BitOr<FundedStateFlags> for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
@@@ -652,19 -371,15 +652,19 @@@ define_state_flags!
        "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
        FundedStateFlags, [
                ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
 -                      until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
 +                      until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
 +                      is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
                ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
                        somewhere and we should pause sending any outbound messages until they've managed to \
 -                      complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
 +                      complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
 +                      is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
                ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
                        any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
 -                      message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
 +                      message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
 +                      is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
                ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
 -                      the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
 +                      the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
 +                      is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
        ]
  );
  
@@@ -672,9 -387,9 +672,9 @@@ define_state_flags!
        "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
        NegotiatingFundingFlags, [
                ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
 -                      OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
 +                      OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
                ("Indicates we have received their `open_channel`/`accept_channel` message.",
 -                      THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
 +                      THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
        ]
  );
  
@@@ -683,16 -398,13 +683,16 @@@ define_state_flags!
        FUNDED_STATE, AwaitingChannelReadyFlags, [
                ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
 -                      THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
 +                      THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
 +                      is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
                ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
 -                      OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
 +                      OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
 +                      is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
                ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
                        is being held until all channels in the batch have received `funding_signed` and have \
 -                      their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
 +                      their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
 +                      is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
        ]
  );
  
@@@ -703,13 -415,10 +703,13 @@@ define_state_flags!
                        `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
                        messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
                        implicit ACK, so instead we have to hold them away temporarily to be sent later.",
 -                      AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
 +                      AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
 +                      is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
        ]
  );
  
 +// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
 +// into account when introducing new states and update `test_channel_state_order` accordingly.
  #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
  enum ChannelState {
        /// We are negotiating the parameters required for the channel prior to funding it.
  }
  
  macro_rules! impl_state_flag {
 -      ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
 +      ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
                #[allow(unused)]
                fn $get(&self) -> bool {
                        match self {
                                $(
 -                                      ChannelState::$state(flags) => flags.is_set($state_flag.into()),
 +                                      ChannelState::$state(flags) => flags.$get(),
                                )*
                                _ => false,
                        }
                fn $set(&mut self) {
                        match self {
                                $(
 -                                      ChannelState::$state(flags) => *flags |= $state_flag,
 +                                      ChannelState::$state(flags) => flags.$set(),
                                )*
                                _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
                        }
                fn $clear(&mut self) {
                        match self {
                                $(
 -                                      ChannelState::$state(flags) => *flags &= !($state_flag),
 +                                      ChannelState::$state(flags) => { let _ = flags.$clear(); },
                                )*
                                _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
                        }
                }
        };
 -      ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
 -              impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
 +      ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
 +              impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
        };
 -      ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
 -              impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
 +      ($get: ident, $set: ident, $clear: ident, $state: ident) => {
 +              impl_state_flag!($get, $set, $clear, [$state]);
        };
  }
  
@@@ -814,27 -523,35 +814,27 @@@ impl ChannelState 
                }
        }
  
 -      fn should_force_holding_cell(&self) -> bool {
 +      fn can_generate_new_commitment(&self) -> bool {
                match self {
                        ChannelState::ChannelReady(flags) =>
 -                              flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
 -                                      flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
 -                                      flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
 +                              !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
 +                                      !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
 +                                      !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
                        _ => {
 -                              debug_assert!(false, "The holding cell is only valid within ChannelReady");
 +                              debug_assert!(false, "Can only generate new commitment within ChannelReady");
                                false
                        },
                }
        }
  
 -      impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
 -              FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
 -      impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
 -              FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
 -      impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
 -              FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
 -      impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
 -              FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
 -      impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
 -              AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
 -      impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
 -              AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
 -      impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
 -              AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
 -      impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
 -              ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
 +      impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
 +      impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
 +      impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
 +      impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
 +      impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
 +      impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
 +      impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
 +      impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
  }
  
  pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@@@ -1071,7 -788,6 +1071,7 @@@ pub(super) struct MonitorRestoreUpdate
        pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
        pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        pub finalized_claimed_htlcs: Vec<HTLCSource>,
 +      pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
        pub funding_broadcastable: Option<Transaction>,
        pub channel_ready: Option<msgs::ChannelReady>,
        pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
@@@ -1100,7 -816,7 +1100,7 @@@ pub(super) struct ReestablishResponses 
  pub(crate) struct ShutdownResult {
        pub(crate) closure_reason: ClosureReason,
        /// A channel monitor update to apply.
 -      pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
 +      pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
        /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
        pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
        /// An unbroadcasted batch funding transaction id. The closure of this channel should be
@@@ -1189,10 -905,6 +1189,10 @@@ impl_writeable_tlv_based!(PendingChanne
  pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
        UnfundedOutboundV1(OutboundV1Channel<SP>),
        UnfundedInboundV1(InboundV1Channel<SP>),
 +      #[cfg(any(dual_funding, splicing))]
 +      UnfundedOutboundV2(OutboundV2Channel<SP>),
 +      #[cfg(any(dual_funding, splicing))]
 +      UnfundedInboundV2(InboundV2Channel<SP>),
        Funded(Channel<SP>),
  }
  
@@@ -1205,10 -917,6 +1205,10 @@@ impl<'a, SP: Deref> ChannelPhase<SP> wh
                        ChannelPhase::Funded(chan) => &chan.context,
                        ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
                        ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
                }
        }
  
                        ChannelPhase::Funded(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
                        ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
                }
        }
  }
@@@ -1319,7 -1023,6 +1319,7 @@@ pub(super) struct ChannelContext<SP: De
        monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
        monitor_pending_finalized_fulfills: Vec<HTLCSource>,
 +      monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
  
        /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
        /// but our signer (initially) refused to give us a signature, we should retry at some point in
        // We track whether we already emitted a `ChannelReady` event.
        channel_ready_event_emitted: bool,
  
 +      /// Some if we initiated to shut down the channel.
 +      local_initiated_shutdown: Option<()>,
 +
        /// The unique identifier used to re-derive the private key material for the channel through
        /// [`SignerProvider::derive_channel_signer`].
 +      #[cfg(not(test))]
        channel_keys_id: [u8; 32],
 +      #[cfg(test)]
 +      pub channel_keys_id: [u8; 32],
  
        /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
        /// store it here and only release it to the `ChannelManager` once it asks for it.
  }
  
  impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
 -      /// Allowed in any state (including after shutdown)
 -      pub fn get_update_time_counter(&self) -> u32 {
 -              self.update_time_counter
 -      }
 +      fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
 +              fee_estimator: &'a LowerBoundedFeeEstimator<F>,
 +              entropy_source: &'a ES,
 +              signer_provider: &'a SP,
 +              counterparty_node_id: PublicKey,
 +              their_features: &'a InitFeatures,
 +              user_id: u128,
 +              config: &'a UserConfig,
 +              current_chain_height: u32,
 +              logger: &'a L,
 +              is_0conf: bool,
 +              our_funding_satoshis: u64,
 +              counterparty_pubkeys: ChannelPublicKeys,
 +              channel_type: ChannelTypeFeatures,
 +              holder_selected_channel_reserve_satoshis: u64,
 +              msg_channel_reserve_satoshis: u64,
 +              msg_push_msat: u64,
 +              open_channel_fields: msgs::CommonOpenChannelFields,
 +      ) -> Result<ChannelContext<SP>, ChannelError>
 +              where
 +                      ES::Target: EntropySource,
 +                      F::Target: FeeEstimator,
 +                      L::Target: Logger,
 +                      SP::Target: SignerProvider,
 +      {
 +              let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
 +              let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
  
 -      pub fn get_latest_monitor_update_id(&self) -> u64 {
 -              self.latest_monitor_update_id
 -      }
 +              let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
  
 -      pub fn should_announce(&self) -> bool {
 -              self.config.announced_channel
 -      }
 +              let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
 +              let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
 +              let pubkeys = holder_signer.pubkeys().clone();
  
 -      pub fn is_outbound(&self) -> bool {
 -              self.channel_transaction_parameters.is_outbound_from_holder
 -      }
 +              if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
 +                      return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
 +              }
  
 -      /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
 -      /// Allowed in any state (including after shutdown)
 -      pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
 -              self.config.options.forwarding_fee_base_msat
 -      }
 +              // Check sanity of message fields:
 +              if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
 +                      return Err(ChannelError::Close(format!(
 +                              "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
 +                              config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
 +                              open_channel_fields.funding_satoshis, our_funding_satoshis)));
 +              }
 +              if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
 +                      return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
 +              }
 +              if msg_channel_reserve_satoshis > channel_value_satoshis {
 +                      return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
 +              }
 +              let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
 +              if msg_push_msat > full_channel_value_msat {
 +                      return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
 +              }
 +              if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
 +                      return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
 +              }
 +              if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
 +                      return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
 +              }
 +              Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
  
 -      /// Returns true if we've ever received a message from the remote end for this Channel
 -      pub fn have_received_message(&self) -> bool {
 -              self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
 -      }
 +              let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
 +              if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
 +                      return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
 +              }
 +              if open_channel_fields.max_accepted_htlcs < 1 {
 +                      return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
 +              }
 +              if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
 +                      return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
 +              }
  
 -      /// Returns true if this channel is fully established and not known to be closing.
 -      /// Allowed in any state (including after shutdown)
 -      pub fn is_usable(&self) -> bool {
 -              matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
 -                      !self.channel_state.is_local_shutdown_sent() &&
 -                      !self.channel_state.is_remote_shutdown_sent() &&
 -                      !self.monitor_pending_channel_ready
 -      }
 +              // Now check against optional parameters as set by config...
 +              if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
 +                      return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
 +              }
 +              if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
 +                      return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
 +              }
 +              if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
 +                      return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
 +              }
 +              if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
 +                      return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
 +              }
 +              if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
 +                      return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
 +              }
 +              if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 +                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
 +              }
 +              if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
 +                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
 +              }
  
 -      /// shutdown state returns the state of the channel in its various stages of shutdown
 -      pub fn shutdown_state(&self) -> ChannelShutdownState {
 -              match self.channel_state {
 -                      ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
 -                              if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
 -                                      ChannelShutdownState::ShutdownInitiated
 -                              } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
 -                                      ChannelShutdownState::ResolvingHTLCs
 -                              } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
 -                                      ChannelShutdownState::NegotiatingClosingFee
 -                              } else {
 -                                      ChannelShutdownState::NotShuttingDown
 -                              },
 -                      ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
 -                      _ => ChannelShutdownState::NotShuttingDown,
 +              // Convert things into internal flags and prep our state:
 +
 +              if config.channel_handshake_limits.force_announced_channel_preference {
 +                      if config.channel_handshake_config.announced_channel != announced_channel {
 +                              return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
 +                      }
                }
 -      }
  
 -      fn closing_negotiation_ready(&self) -> bool {
 -              let is_ready_to_close = match self.channel_state {
 -                      ChannelState::AwaitingChannelReady(flags) =>
 -                              flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
 -                      ChannelState::ChannelReady(flags) =>
 -                              flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
 -                      _ => false,
 -              };
 -              self.pending_inbound_htlcs.is_empty() &&
 -                      self.pending_outbound_htlcs.is_empty() &&
 -                      self.pending_update_fee.is_none() &&
 -                      is_ready_to_close
 -      }
 +              if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 +                      // Protocol level safety check in place, although it should never happen because
 +                      // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
 +                      return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
 +              }
 +              if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
 +                      return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
 +              }
 +              if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 +                      log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
 +                              msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
 +              }
 +              if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
 +                      return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
 +              }
  
 -      /// Returns true if this channel is currently available for use. This is a superset of
 -      /// is_usable() and considers things like the channel being temporarily disabled.
 -      /// Allowed in any state (including after shutdown)
 -      pub fn is_live(&self) -> bool {
 -              self.is_usable() && !self.channel_state.is_peer_disconnected()
 -      }
 +              // check if the funder's amount for the initial commitment tx is sufficient
 +              // for full fee payment plus a few HTLCs to ensure the channel will be useful.
 +              let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
 +                      ANCHOR_OUTPUT_VALUE_SATOSHI * 2
 +              } else {
 +                      0
 +              };
 +              let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
 +              let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
 +              if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
 +                      return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
 +              }
  
 -      // Public utilities:
 +              let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
 +              // While it's reasonable for us to not meet the channel reserve initially (if they don't
 +              // want to push much to us), our counterparty should always have more than our reserve.
 +              if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
 +                      return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
 +              }
  
 -      pub fn channel_id(&self) -> ChannelId {
 -              self.channel_id
 -      }
 +              let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
 +                      match &open_channel_fields.shutdown_scriptpubkey {
 +                              &Some(ref script) => {
 +                                      // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
 +                                      if script.len() == 0 {
 +                                              None
 +                                      } else {
 +                                              if !script::is_bolt2_compliant(&script, their_features) {
 +                                                      return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
 +                                              }
 +                                              Some(script.clone())
 +                                      }
 +                              },
 +                              // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
 +                              &None => {
 +                                      return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
 +                              }
 +                      }
 +              } else { None };
  
 -      // Return the `temporary_channel_id` used during channel establishment.
 -      //
 -      // Will return `None` for channels created prior to LDK version 0.0.115.
 -      pub fn temporary_channel_id(&self) -> Option<ChannelId> {
 -              self.temporary_channel_id
 -      }
 +              let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
 +                      match signer_provider.get_shutdown_scriptpubkey() {
 +                              Ok(scriptpubkey) => Some(scriptpubkey),
 +                              Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
 +                      }
 +              } else { None };
  
 -      pub fn minimum_depth(&self) -> Option<u32> {
 -              self.minimum_depth
 -      }
 +              if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
 +                      if !shutdown_scriptpubkey.is_compatible(&their_features) {
 +                              return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
 +                      }
 +              }
  
 -      /// Gets the "user_id" value passed into the construction of this channel. It has no special
 -      /// meaning and exists only to allow users to have a persistent identifier of a channel.
 -      pub fn get_user_id(&self) -> u128 {
 -              self.user_id
 -      }
 +              let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
 +                      Ok(script) => script,
 +                      Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
 +              };
  
 -      /// Gets the channel's type
 -      pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
 -              &self.channel_type
 -      }
 +              let mut secp_ctx = Secp256k1::new();
 +              secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 +
 +              let minimum_depth = if is_0conf {
 +                      Some(0)
 +              } else {
 +                      Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
 +              };
 +
 +              let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
 +
 +              // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
 +
 +              let channel_context = ChannelContext {
 +                      user_id,
 +
 +                      config: LegacyChannelConfig {
 +                              options: config.channel_config.clone(),
 +                              announced_channel,
 +                              commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
 +                      },
 +
 +                      prev_config: None,
 +
 +                      inbound_handshake_limits_override: None,
 +
 +                      temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
 +                      channel_id: open_channel_fields.temporary_channel_id,
 +                      channel_state: ChannelState::NegotiatingFunding(
 +                              NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
 +                      ),
 +                      announcement_sigs_state: AnnouncementSigsState::NotSent,
 +                      secp_ctx,
 +
 +                      latest_monitor_update_id: 0,
 +
 +                      holder_signer: ChannelSignerType::Ecdsa(holder_signer),
 +                      shutdown_scriptpubkey,
 +                      destination_script,
 +
 +                      cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 +                      cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 +                      value_to_self_msat,
 +
 +                      pending_inbound_htlcs: Vec::new(),
 +                      pending_outbound_htlcs: Vec::new(),
 +                      holding_cell_htlc_updates: Vec::new(),
 +                      pending_update_fee: None,
 +                      holding_cell_update_fee: None,
 +                      next_holder_htlc_id: 0,
 +                      next_counterparty_htlc_id: 0,
 +                      update_time_counter: 1,
 +
 +                      resend_order: RAACommitmentOrder::CommitmentFirst,
 +
 +                      monitor_pending_channel_ready: false,
 +                      monitor_pending_revoke_and_ack: false,
 +                      monitor_pending_commitment_signed: false,
 +                      monitor_pending_forwards: Vec::new(),
 +                      monitor_pending_failures: Vec::new(),
 +                      monitor_pending_finalized_fulfills: Vec::new(),
 +                      monitor_pending_update_adds: Vec::new(),
 +
 +                      signer_pending_commitment_update: false,
 +                      signer_pending_funding: false,
 +
 +
 +                      #[cfg(debug_assertions)]
 +                      holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
 +                      #[cfg(debug_assertions)]
 +                      counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
 +
 +                      last_sent_closing_fee: None,
 +                      pending_counterparty_closing_signed: None,
 +                      expecting_peer_commitment_signed: false,
 +                      closing_fee_limits: None,
 +                      target_closing_feerate_sats_per_kw: None,
 +
 +                      funding_tx_confirmed_in: None,
 +                      funding_tx_confirmation_height: 0,
 +                      short_channel_id: None,
 +                      channel_creation_height: current_chain_height,
 +
 +                      feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
 +                      channel_value_satoshis,
 +                      counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
 +                      holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
 +                      counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
 +                      holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
 +                      counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
 +                      holder_selected_channel_reserve_satoshis,
 +                      counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
 +                      holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
 +                      counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
 +                      holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
 +                      minimum_depth,
 +
 +                      counterparty_forwarding_info: None,
 +
 +                      channel_transaction_parameters: ChannelTransactionParameters {
 +                              holder_pubkeys: pubkeys,
 +                              holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
 +                              is_outbound_from_holder: false,
 +                              counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
 +                                      selected_contest_delay: open_channel_fields.to_self_delay,
 +                                      pubkeys: counterparty_pubkeys,
 +                              }),
 +                              funding_outpoint: None,
 +                              channel_type_features: channel_type.clone()
 +                      },
 +                      funding_transaction: None,
 +                      is_batch_funding: None,
 +
 +                      counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
 +                      counterparty_prev_commitment_point: None,
 +                      counterparty_node_id,
 +
 +                      counterparty_shutdown_scriptpubkey,
 +
 +                      commitment_secrets: CounterpartyCommitmentSecrets::new(),
 +
 +                      channel_update_status: ChannelUpdateStatus::Enabled,
 +                      closing_signed_in_flight: false,
 +
 +                      announcement_sigs: None,
 +
 +                      #[cfg(any(test, fuzzing))]
 +                      next_local_commitment_tx_fee_info_cached: Mutex::new(None),
 +                      #[cfg(any(test, fuzzing))]
 +                      next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +
 +                      workaround_lnd_bug_4006: None,
 +                      sent_message_awaiting_response: None,
 +
 +                      latest_inbound_scid_alias: None,
 +                      outbound_scid_alias: 0,
 +
 +                      channel_pending_event_emitted: false,
 +                      channel_ready_event_emitted: false,
 +
 +                      #[cfg(any(test, fuzzing))]
 +                      historical_inbound_htlc_fulfills: new_hash_set(),
 +
 +                      channel_type,
 +                      channel_keys_id,
 +
 +                      local_initiated_shutdown: None,
 +
 +                      blocked_monitor_updates: Vec::new(),
 +              };
 +
 +              Ok(channel_context)
 +      }
 +
 +      fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
 +              fee_estimator: &'a LowerBoundedFeeEstimator<F>,
 +              entropy_source: &'a ES,
 +              signer_provider: &'a SP,
 +              counterparty_node_id: PublicKey,
 +              their_features: &'a InitFeatures,
 +              funding_satoshis: u64,
 +              push_msat: u64,
 +              user_id: u128,
 +              config: &'a UserConfig,
 +              current_chain_height: u32,
 +              outbound_scid_alias: u64,
 +              temporary_channel_id: Option<ChannelId>,
 +              holder_selected_channel_reserve_satoshis: u64,
 +              channel_keys_id: [u8; 32],
 +              holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
 +              pubkeys: ChannelPublicKeys,
 +      ) -> Result<ChannelContext<SP>, APIError>
 +              where
 +                      ES::Target: EntropySource,
 +                      F::Target: FeeEstimator,
 +                      SP::Target: SignerProvider,
 +      {
 +              // This will be updated with the counterparty contribution if this is a dual-funded channel
 +              let channel_value_satoshis = funding_satoshis;
 +
 +              let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
 +
 +              if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
 +                      return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
 +              }
 +              if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
 +                      return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
 +              }
 +              let channel_value_msat = channel_value_satoshis * 1000;
 +              if push_msat > channel_value_msat {
 +                      return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
 +              }
 +              if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
 +                      return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
 +              }
 +
 +              let channel_type = get_initial_channel_type(&config, their_features);
 +              debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
 +
 +              let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
 +                      (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
 +              } else {
 +                      (ConfirmationTarget::NonAnchorChannelFee, 0)
 +              };
 +              let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
 +
 +              let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
 +              let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
 +              if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
 +                      return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
 +              }
 +
 +              let mut secp_ctx = Secp256k1::new();
 +              secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 +
 +              let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
 +                      match signer_provider.get_shutdown_scriptpubkey() {
 +                              Ok(scriptpubkey) => Some(scriptpubkey),
 +                              Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
 +                      }
 +              } else { None };
 +
 +              if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
 +                      if !shutdown_scriptpubkey.is_compatible(&their_features) {
 +                              return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
 +                      }
 +              }
 +
 +              let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
 +                      Ok(script) => script,
 +                      Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
 +              };
 +
 +              let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
 +
 +              Ok(Self {
 +                      user_id,
 +
 +                      config: LegacyChannelConfig {
 +                              options: config.channel_config.clone(),
 +                              announced_channel: config.channel_handshake_config.announced_channel,
 +                              commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
 +                      },
 +
 +                      prev_config: None,
 +
 +                      inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
 +
 +                      channel_id: temporary_channel_id,
 +                      temporary_channel_id: Some(temporary_channel_id),
 +                      channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
 +                      announcement_sigs_state: AnnouncementSigsState::NotSent,
 +                      secp_ctx,
 +                      // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
 +                      channel_value_satoshis,
 +
 +                      latest_monitor_update_id: 0,
 +
 +                      holder_signer: ChannelSignerType::Ecdsa(holder_signer),
 +                      shutdown_scriptpubkey,
 +                      destination_script,
 +
 +                      cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 +                      cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 +                      value_to_self_msat,
 +
 +                      pending_inbound_htlcs: Vec::new(),
 +                      pending_outbound_htlcs: Vec::new(),
 +                      holding_cell_htlc_updates: Vec::new(),
 +                      pending_update_fee: None,
 +                      holding_cell_update_fee: None,
 +                      next_holder_htlc_id: 0,
 +                      next_counterparty_htlc_id: 0,
 +                      update_time_counter: 1,
 +
 +                      resend_order: RAACommitmentOrder::CommitmentFirst,
 +
 +                      monitor_pending_channel_ready: false,
 +                      monitor_pending_revoke_and_ack: false,
 +                      monitor_pending_commitment_signed: false,
 +                      monitor_pending_forwards: Vec::new(),
 +                      monitor_pending_failures: Vec::new(),
 +                      monitor_pending_finalized_fulfills: Vec::new(),
 +                      monitor_pending_update_adds: Vec::new(),
 +
 +                      signer_pending_commitment_update: false,
 +                      signer_pending_funding: false,
 +
 +                      // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
 +                      // when we receive `accept_channel2`.
 +                      #[cfg(debug_assertions)]
 +                      holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 +                      #[cfg(debug_assertions)]
 +                      counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 +
 +                      last_sent_closing_fee: None,
 +                      pending_counterparty_closing_signed: None,
 +                      expecting_peer_commitment_signed: false,
 +                      closing_fee_limits: None,
 +                      target_closing_feerate_sats_per_kw: None,
 +
 +                      funding_tx_confirmed_in: None,
 +                      funding_tx_confirmation_height: 0,
 +                      short_channel_id: None,
 +                      channel_creation_height: current_chain_height,
 +
 +                      feerate_per_kw: commitment_feerate,
 +                      counterparty_dust_limit_satoshis: 0,
 +                      holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
 +                      counterparty_max_htlc_value_in_flight_msat: 0,
 +                      // We'll adjust this to include our counterparty's `funding_satoshis` when we
 +                      // receive `accept_channel2`.
 +                      holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
 +                      counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
 +                      holder_selected_channel_reserve_satoshis,
 +                      counterparty_htlc_minimum_msat: 0,
 +                      holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
 +                      counterparty_max_accepted_htlcs: 0,
 +                      holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
 +                      minimum_depth: None, // Filled in in accept_channel
 +
 +                      counterparty_forwarding_info: None,
 +
 +                      channel_transaction_parameters: ChannelTransactionParameters {
 +                              holder_pubkeys: pubkeys,
 +                              holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
 +                              is_outbound_from_holder: true,
 +                              counterparty_parameters: None,
 +                              funding_outpoint: None,
 +                              channel_type_features: channel_type.clone()
 +                      },
 +                      funding_transaction: None,
 +                      is_batch_funding: None,
 +
 +                      counterparty_cur_commitment_point: None,
 +                      counterparty_prev_commitment_point: None,
 +                      counterparty_node_id,
 +
 +                      counterparty_shutdown_scriptpubkey: None,
 +
 +                      commitment_secrets: CounterpartyCommitmentSecrets::new(),
 +
 +                      channel_update_status: ChannelUpdateStatus::Enabled,
 +                      closing_signed_in_flight: false,
 +
 +                      announcement_sigs: None,
 +
 +                      #[cfg(any(test, fuzzing))]
 +                      next_local_commitment_tx_fee_info_cached: Mutex::new(None),
 +                      #[cfg(any(test, fuzzing))]
 +                      next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +
 +                      workaround_lnd_bug_4006: None,
 +                      sent_message_awaiting_response: None,
 +
 +                      latest_inbound_scid_alias: None,
 +                      outbound_scid_alias,
 +
 +                      channel_pending_event_emitted: false,
 +                      channel_ready_event_emitted: false,
 +
 +                      #[cfg(any(test, fuzzing))]
 +                      historical_inbound_htlc_fulfills: new_hash_set(),
 +
 +                      channel_type,
 +                      channel_keys_id,
 +
 +                      blocked_monitor_updates: Vec::new(),
 +                      local_initiated_shutdown: None,
 +              })
 +      }
 +
 +      /// Allowed in any state (including after shutdown)
 +      pub fn get_update_time_counter(&self) -> u32 {
 +              self.update_time_counter
 +      }
 +
 +      pub fn get_latest_monitor_update_id(&self) -> u64 {
 +              self.latest_monitor_update_id
 +      }
 +
 +      pub fn should_announce(&self) -> bool {
 +              self.config.announced_channel
 +      }
 +
 +      pub fn is_outbound(&self) -> bool {
 +              self.channel_transaction_parameters.is_outbound_from_holder
 +      }
 +
 +      /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
 +      /// Allowed in any state (including after shutdown)
 +      pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
 +              self.config.options.forwarding_fee_base_msat
 +      }
 +
 +      /// Returns true if we've ever received a message from the remote end for this Channel
 +      pub fn have_received_message(&self) -> bool {
 +              self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
 +      }
 +
 +      /// Returns true if this channel is fully established and not known to be closing.
 +      /// Allowed in any state (including after shutdown)
 +      pub fn is_usable(&self) -> bool {
 +              matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
 +                      !self.channel_state.is_local_shutdown_sent() &&
 +                      !self.channel_state.is_remote_shutdown_sent() &&
 +                      !self.monitor_pending_channel_ready
 +      }
 +
 +      /// shutdown state returns the state of the channel in its various stages of shutdown
 +      pub fn shutdown_state(&self) -> ChannelShutdownState {
 +              match self.channel_state {
 +                      ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
 +                              if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
 +                                      ChannelShutdownState::ShutdownInitiated
 +                              } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
 +                                      ChannelShutdownState::ResolvingHTLCs
 +                              } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
 +                                      ChannelShutdownState::NegotiatingClosingFee
 +                              } else {
 +                                      ChannelShutdownState::NotShuttingDown
 +                              },
 +                      ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
 +                      _ => ChannelShutdownState::NotShuttingDown,
 +              }
 +      }
 +
 +      fn closing_negotiation_ready(&self) -> bool {
 +              let is_ready_to_close = match self.channel_state {
 +                      ChannelState::AwaitingChannelReady(flags) =>
 +                              flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
 +                      ChannelState::ChannelReady(flags) =>
 +                              flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
 +                      _ => false,
 +              };
 +              self.pending_inbound_htlcs.is_empty() &&
 +                      self.pending_outbound_htlcs.is_empty() &&
 +                      self.pending_update_fee.is_none() &&
 +                      is_ready_to_close
 +      }
 +
 +      /// Returns true if this channel is currently available for use. This is a superset of
 +      /// is_usable() and considers things like the channel being temporarily disabled.
 +      /// Allowed in any state (including after shutdown)
 +      pub fn is_live(&self) -> bool {
 +              self.is_usable() && !self.channel_state.is_peer_disconnected()
 +      }
 +
 +      // Public utilities:
 +
 +      pub fn channel_id(&self) -> ChannelId {
 +              self.channel_id
 +      }
 +
 +      // Return the `temporary_channel_id` used during channel establishment.
 +      //
 +      // Will return `None` for channels created prior to LDK version 0.0.115.
 +      pub fn temporary_channel_id(&self) -> Option<ChannelId> {
 +              self.temporary_channel_id
 +      }
 +
 +      pub fn minimum_depth(&self) -> Option<u32> {
 +              self.minimum_depth
 +      }
 +
 +      /// Gets the "user_id" value passed into the construction of this channel. It has no special
 +      /// meaning and exists only to allow users to have a persistent identifier of a channel.
 +      pub fn get_user_id(&self) -> u128 {
 +              self.user_id
 +      }
 +
 +      /// Gets the channel's type
 +      pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
 +              &self.channel_type
 +      }
  
        /// Gets the channel's `short_channel_id`.
        ///
                        feerate_per_kw = cmp::max(feerate_per_kw, feerate);
                }
                let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
 -              cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
 +              cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
        }
  
        /// Get forwarding information for the counterparty.
                stats
        }
  
 +      /// Returns information on all pending inbound HTLCs.
 +      pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
 +              let mut holding_cell_states = new_hash_map();
 +              for holding_cell_update in self.holding_cell_htlc_updates.iter() {
 +                      match holding_cell_update {
 +                              HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
 +                                      holding_cell_states.insert(
 +                                              htlc_id,
 +                                              InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
 +                                      );
 +                              },
 +                              HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
 +                                      holding_cell_states.insert(
 +                                              htlc_id,
 +                                              InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
 +                                      );
 +                              },
 +                              HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
 +                                      holding_cell_states.insert(
 +                                              htlc_id,
 +                                              InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
 +                                      );
 +                              },
 +                              // Outbound HTLC.
 +                              HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
 +                      }
 +              }
 +              let mut inbound_details = Vec::new();
 +              let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                      0
 +              } else {
 +                      let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
 +                      dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
 +              };
 +              let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
 +              for htlc in self.pending_inbound_htlcs.iter() {
 +                      if let Some(state_details) = (&htlc.state).into() {
 +                              inbound_details.push(InboundHTLCDetails{
 +                                      htlc_id: htlc.htlc_id,
 +                                      amount_msat: htlc.amount_msat,
 +                                      cltv_expiry: htlc.cltv_expiry,
 +                                      payment_hash: htlc.payment_hash,
 +                                      state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
 +                                      is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
 +                              });
 +                      }
 +              }
 +              inbound_details
 +      }
 +
 +      /// Returns information on all pending outbound HTLCs.
 +      pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
 +              let mut outbound_details = Vec::new();
 +              let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                      0
 +              } else {
 +                      let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
 +                      dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
 +              };
 +              let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
 +              for htlc in self.pending_outbound_htlcs.iter() {
 +                      outbound_details.push(OutboundHTLCDetails{
 +                              htlc_id: Some(htlc.htlc_id),
 +                              amount_msat: htlc.amount_msat,
 +                              cltv_expiry: htlc.cltv_expiry,
 +                              payment_hash: htlc.payment_hash,
 +                              skimmed_fee_msat: htlc.skimmed_fee_msat,
 +                              state: Some((&htlc.state).into()),
 +                              is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
 +                      });
 +              }
 +              for holding_cell_update in self.holding_cell_htlc_updates.iter() {
 +                      if let HTLCUpdateAwaitingACK::AddHTLC {
 +                              amount_msat,
 +                              cltv_expiry,
 +                              payment_hash,
 +                              skimmed_fee_msat,
 +                              ..
 +                      } = *holding_cell_update {
 +                              outbound_details.push(OutboundHTLCDetails{
 +                                      htlc_id: None,
 +                                      amount_msat: amount_msat,
 +                                      cltv_expiry: cltv_expiry,
 +                                      payment_hash: payment_hash,
 +                                      skimmed_fee_msat: skimmed_fee_msat,
 +                                      state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
 +                                      is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
 +                              });
 +                      }
 +              }
 +              outbound_details
 +      }
 +
        /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
        /// Doesn't bother handling the
        /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
 -                      let generate_monitor_update = match self.channel_state {
 -                              ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
 -                              _ => false,
 -                      };
 -                      if generate_monitor_update {
 +                      if !self.channel_state.is_pre_funded_state() {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
 -                              Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
 +                              Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
                                        counterparty_node_id: Some(self.counterparty_node_id),
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
 +                                      channel_id: Some(self.channel_id()),
                                }))
                        } else { None }
                } else { None };
                        _ => todo!()
                }
        }
 -}
 -
 -// Internal utility functions for channels
  
 -/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
 -/// `channel_value_satoshis` in msat, set through
 -/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
 -///
 +      /// If we receive an error message when attempting to open a channel, it may only be a rejection
 +      /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
 +      /// downgrade of channel features would be possible so that we can still open the channel.
 +      pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
 +              &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
 +      ) -> Result<(), ()>
 +      where
 +              F::Target: FeeEstimator
 +      {
 +              if !self.is_outbound() ||
 +                      !matches!(
 +                              self.channel_state, ChannelState::NegotiatingFunding(flags)
 +                              if flags == NegotiatingFundingFlags::OUR_INIT_SENT
 +                      )
 +              {
 +                      return Err(());
 +              }
 +              if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
 +                      // We've exhausted our options
 +                      return Err(());
 +              }
 +              // We support opening a few different types of channels. Try removing our additional
 +              // features one by one until we've either arrived at our default or the counterparty has
 +              // accepted one.
 +              //
 +              // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
 +              // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
 +              // checks whether the counterparty supports every feature, this would only happen if the
 +              // counterparty is advertising the feature, but rejecting channels proposing the feature for
 +              // whatever reason.
 +              if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
 +                      self.channel_type.clear_anchors_zero_fee_htlc_tx();
 +                      self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
 +                      assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
 +              } else if self.channel_type.supports_scid_privacy() {
 +                      self.channel_type.clear_scid_privacy();
 +              } else {
 +                      self.channel_type = ChannelTypeFeatures::only_static_remote_key();
 +              }
 +              self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
 +              Ok(())
 +      }
 +}
 +
 +// Internal utility functions for channels
 +
 +/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
 +/// `channel_value_satoshis` in msat, set through
 +/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
 +///
  /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
  ///
  /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
@@@ -3494,20 -2507,6 +3494,20 @@@ pub(crate) fn get_legacy_default_holder
        cmp::min(channel_value_satoshis, cmp::max(q, 1000))
  }
  
 +/// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
 +/// default of 1% of the total channel value.
 +///
 +/// Guaranteed to return a value no larger than channel_value_satoshis
 +///
 +/// This is used both for outbound and inbound channels and has lower bound
 +/// of `dust_limit_satoshis`.
 +#[cfg(any(dual_funding, splicing))]
 +fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
 +      // Fixed at 1% of channel value by spec.
 +      let (q, _) = channel_value_satoshis.overflowing_div(100);
 +      cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
 +}
 +
  // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
  // Note that num_htlcs should not include dust HTLCs.
  #[inline]
@@@ -3523,26 -2522,10 +3523,26 @@@ pub(crate) fn commit_tx_fee_msat(feerat
        (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
  }
  
 +/// Context for dual-funded channels.
 +#[cfg(any(dual_funding, splicing))]
 +pub(super) struct DualFundingChannelContext {
 +      /// The amount in satoshis we will be contributing to the channel.
 +      pub our_funding_satoshis: u64,
 +      /// The amount in satoshis our counterparty will be contributing to the channel.
 +      pub their_funding_satoshis: u64,
 +      /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
 +      /// to the current block height to align incentives against fee-sniping.
 +      pub funding_tx_locktime: u32,
 +      /// The feerate set by the initiator to be used for the funding transaction.
 +      pub funding_feerate_sat_per_1000_weight: u32,
 +}
 +
  // Holder designates channel data owned for the benefit of the user client.
  // Counterparty designates channel data owned by the another channel participant entity.
  pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
        pub context: ChannelContext<SP>,
 +      #[cfg(any(dual_funding, splicing))]
 +      pub dual_funding_channel_context: Option<DualFundingChannelContext>,
  }
  
  #[cfg(any(test, fuzzing))]
@@@ -3726,7 -2709,7 +3726,7 @@@ impl<SP: Deref> Channel<SP> wher
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
 -              assert!(self.context.channel_state.should_force_holding_cell());
 +              assert!(!self.context.channel_state.can_generate_new_commitment());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage: payment_preimage_arg.clone(),
                        }],
 +                      channel_id: Some(self.context.channel_id()),
                };
  
 -              if self.context.channel_state.should_force_holding_cell() {
 +              if !self.context.channel_state.can_generate_new_commitment() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
                        return Ok(None);
                }
  
 -              if self.context.channel_state.should_force_holding_cell() {
 +              if !self.context.channel_state.can_generate_new_commitment() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
                let mut check_reconnection = false;
                match &self.context.channel_state {
                        ChannelState::AwaitingChannelReady(flags) => {
 -                              let flags = *flags & !FundedStateFlags::ALL;
 +                              let flags = flags.clone().clear(FundedStateFlags::ALL.into());
                                debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
 -                              if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
 +                              if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
                                        // If we reconnected before sending our `channel_ready` they may still resend theirs.
                                        check_reconnection = true;
 -                              } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
 +                              } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
                                        self.context.channel_state.set_their_channel_ready();
                                } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
                                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
  
                log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
  
 -              Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
 +              Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
        }
  
 -      pub fn update_add_htlc<F, FE: Deref, L: Deref>(
 -              &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
 -              create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
 -      ) -> Result<(), ChannelError>
 -      where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
 -              FE::Target: FeeEstimator, L::Target: Logger,
 -      {
 +      pub fn update_add_htlc(
 +              &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
 +      ) -> Result<(), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
 -              // We can't accept HTLCs sent after we've sent a shutdown.
 -              if self.context.channel_state.is_local_shutdown_sent() {
 -                      pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
 -              }
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
                if self.context.channel_state.is_remote_shutdown_sent() {
                        return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
  
                let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
 -              let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
                if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
                        return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
                }
                        }
                }
  
 -              let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
 -              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 -                      (0, 0)
 -              } else {
 -                      let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
 -                      (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
 -                              dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
 -              };
 -              let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
 -              if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
 -                      let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
 -                      if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
 -                              log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
 -                                      on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
 -                              pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
 -                      }
 -              }
 -
 -              let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
 -              if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
 -                      let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
 -                      if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
 -                              log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
 -                                      on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
 -                              pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
 -                      }
 -              }
 -
                let pending_value_to_self_msat =
                        self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
                let pending_remote_value_msat =
                } else {
                        0
                };
 -              if !self.context.is_outbound() {
 -                      // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
 -                      // the spec because the fee spike buffer requirement doesn't exist on the receiver's
 -                      // side, only on the sender's. Note that with anchor outputs we are no longer as
 -                      // sensitive to fee spikes, so we need to account for them.
 -                      let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
 -                      let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
 -                      if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 -                              remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
 -                      }
 -                      if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
 -                              // Note that if the pending_forward_status is not updated here, then it's because we're already failing
 -                              // the HTLC, i.e. its status is already set to failing.
 -                              log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
 -                              pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
 -                      }
 -              } else {
 +              if self.context.is_outbound() {
                        // Check that they won't violate our local required channel reserve by adding this HTLC.
                        let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
                        let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
                        amount_msat: msg.amount_msat,
                        payment_hash: msg.payment_hash,
                        cltv_expiry: msg.cltv_expiry,
 -                      state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
 +                      state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
 +                              pending_htlc_status: pending_forward_status
 +                      }),
                });
                Ok(())
        }
                Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
  
 -      pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
 +      pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
                        return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
  
 -              self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
 +              self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
        }
  
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                }
  
                for htlc in self.context.pending_inbound_htlcs.iter_mut() {
 -                      let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
 -                              Some(forward_info.clone())
 +                      let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
 +                              Some(resolution.clone())
                        } else { None };
 -                      if let Some(forward_info) = new_forward {
 +                      if let Some(htlc_resolution) = htlc_resolution {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
                                        &htlc.payment_hash, &self.context.channel_id);
 -                              htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
 +                              htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
                                need_commitment = true;
                        }
                }
                                htlc_outputs: htlcs_and_sigs,
                                claimed_htlcs,
                                nondust_htlc_sources,
 -                      }]
 +                      }],
 +                      channel_id: Some(self.context.channel_id()),
                };
  
                self.context.cur_holder_commitment_transaction_number -= 1;
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
 -              if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
 +              if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
                                update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
                                counterparty_node_id: Some(self.context.counterparty_node_id),
                                updates: Vec::new(),
 +                              channel_id: Some(self.context.channel_id()),
                        };
  
                        let mut htlc_updates = Vec::new();
                                idx: self.context.cur_counterparty_commitment_transaction_number + 1,
                                secret: msg.per_commitment_secret,
                        }],
 +                      channel_id: Some(self.context.channel_id()),
                };
  
                // Update state now that we've passed all the can-fail calls...
  
                log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
                let mut to_forward_infos = Vec::new();
 +              let mut pending_update_adds = Vec::new();
                let mut revoked_htlcs = Vec::new();
                let mut finalized_claimed_htlcs = Vec::new();
                let mut update_fail_htlcs = Vec::new();
                                        let mut state = InboundHTLCState::Committed;
                                        mem::swap(&mut state, &mut htlc.state);
  
 -                                      if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
 +                                      if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
                                                log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
 -                                              htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
 +                                              htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
                                                require_commitment = true;
 -                                      } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
 -                                              match forward_info {
 -                                                      PendingHTLCStatus::Fail(fail_msg) => {
 -                                                              log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
 -                                                              require_commitment = true;
 -                                                              match fail_msg {
 -                                                                      HTLCFailureMsg::Relay(msg) => {
 -                                                                              htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
 -                                                                              update_fail_htlcs.push(msg)
 -                                                                      },
 -                                                                      HTLCFailureMsg::Malformed(msg) => {
 -                                                                              htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
 -                                                                              update_fail_malformed_htlcs.push(msg)
 +                                      } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
 +                                              match resolution {
 +                                                      InboundHTLCResolution::Resolved { pending_htlc_status } =>
 +                                                              match pending_htlc_status {
 +                                                                      PendingHTLCStatus::Fail(fail_msg) => {
 +                                                                              log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
 +                                                                              require_commitment = true;
 +                                                                              match fail_msg {
 +                                                                                      HTLCFailureMsg::Relay(msg) => {
 +                                                                                              htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
 +                                                                                              update_fail_htlcs.push(msg)
 +                                                                                      },
 +                                                                                      HTLCFailureMsg::Malformed(msg) => {
 +                                                                                              htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
 +                                                                                              update_fail_malformed_htlcs.push(msg)
 +                                                                                      },
 +                                                                              }
                                                                        },
 +                                                                      PendingHTLCStatus::Forward(forward_info) => {
 +                                                                              log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
 +                                                                              to_forward_infos.push((forward_info, htlc.htlc_id));
 +                                                                              htlc.state = InboundHTLCState::Committed;
 +                                                                      }
                                                                }
 -                                                      },
 -                                                      PendingHTLCStatus::Forward(forward_info) => {
 +                                                      InboundHTLCResolution::Pending { update_add_htlc } => {
                                                                log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
 -                                                              to_forward_infos.push((forward_info, htlc.htlc_id));
 +                                                              pending_update_adds.push(update_add_htlc);
                                                                htlc.state = InboundHTLCState::Committed;
                                                        }
                                                }
                        }
                }
  
 +              self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
 +
                if self.context.channel_state.is_monitor_update_in_progress() {
                        // We can't actually generate a new commitment transaction (incl by freeing holding
                        // cells) while we can't update the monitor, so we just return what we have.
                // first received the funding_signed.
                let mut funding_broadcastable =
                        if self.context.is_outbound() &&
 -                              matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
 -                              matches!(self.context.channel_state, ChannelState::ChannelReady(_))
 +                              (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
 +                              matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
                        {
                                self.context.funding_transaction.take()
                        } else { None };
                mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
                let mut finalized_claimed_htlcs = Vec::new();
                mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
 +              let mut pending_update_adds = Vec::new();
 +              mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
  
                if self.context.channel_state.is_peer_disconnected() {
                        self.context.monitor_pending_revoke_and_ack = false;
                        self.context.monitor_pending_commitment_signed = false;
                        return MonitorRestoreUpdates {
                                raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
 -                              accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
 +                              accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
 +                              funding_broadcastable, channel_ready, announcement_sigs
                        };
                }
  
                        if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
                MonitorRestoreUpdates {
 -                      raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
 +                      raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
 +                      pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
                }
        }
  
  
                let shutdown_msg = self.get_outbound_shutdown();
  
 -              let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
 +              let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
  
                if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
                        // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
 +                              channel_id: Some(self.context.channel_id()),
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
                        self.push_ret_blockable_mon_update(monitor_update)
                        }
                }
  
 +              let closure_reason = if self.initiated_shutdown() {
 +                      ClosureReason::LocallyInitiatedCooperativeClosure
 +              } else {
 +                      ClosureReason::CounterpartyInitiatedCooperativeClosure
 +              };
 +
                assert!(self.context.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
 -                                      closure_reason: ClosureReason::CooperativeClosure,
 +                                      closure_reason,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
 -                                                              closure_reason: ClosureReason::CooperativeClosure,
 +                                                              closure_reason,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
                        })
        }
  
 +      pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
 +              &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
 +      ) -> Result<(), (&'static str, u16)>
 +      where
 +              F::Target: FeeEstimator,
 +              L::Target: Logger
 +      {
 +              if self.context.channel_state.is_local_shutdown_sent() {
 +                      return Err(("Shutdown was already sent", 0x4000|8))
 +              }
 +
 +              let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
 +              let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
 +              let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
 +              let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                      (0, 0)
 +              } else {
 +                      let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
 +                      (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
 +                              dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
 +              };
 +              let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
 +              if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
 +                      let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
 +                      if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
 +                              log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
 +                                      on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
 +                              return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
 +                      }
 +              }
 +
 +              let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
 +              if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
 +                      let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
 +                      if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
 +                              log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
 +                                      on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
 +                              return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
 +                      }
 +              }
 +
 +              let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                      ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
 +              } else {
 +                      0
 +              };
 +
 +              let mut removed_outbound_total_msat = 0;
 +              for ref htlc in self.context.pending_outbound_htlcs.iter() {
 +                      if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
 +                              removed_outbound_total_msat += htlc.amount_msat;
 +                      } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
 +                              removed_outbound_total_msat += htlc.amount_msat;
 +                      }
 +              }
 +
 +              let pending_value_to_self_msat =
 +                      self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
 +              let pending_remote_value_msat =
 +                      self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
 +
 +              if !self.context.is_outbound() {
 +                      // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
 +                      // the spec because the fee spike buffer requirement doesn't exist on the receiver's
 +                      // side, only on the sender's. Note that with anchor outputs we are no longer as
 +                      // sensitive to fee spikes, so we need to account for them.
 +                      let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
 +                      let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
 +                      if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
 +                              remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
 +                      }
 +                      if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
 +                              log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
 +                              return Err(("Fee spike buffer violation", 0x1000|7));
 +                      }
 +              }
 +
 +              Ok(())
 +      }
 +
        pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
                self.context.cur_holder_commitment_transaction_number + 1
        }
                }
        }
  
+       /// On startup, its possible we detect some monitor updates have actually completed (and the
+       /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
+       /// here after logging them.
+       pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
+               let channel_id = self.context.channel_id();
+               self.context.blocked_monitor_updates.retain(|update| {
+                       if update.update.update_id <= loaded_mon_update_id {
+                               log_info!(
+                                       logger,
+                                       "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
+                                       update.update.update_id,
+                                       channel_id,
+                               );
+                               false
+                       } else {
+                               true
+                       }
+               });
+       }
        pub fn blocked_monitor_updates_pending(&self) -> usize {
                self.context.blocked_monitor_updates.len()
        }
                if !self.is_awaiting_monitor_update() { return false; }
                if matches!(
                        self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
 -                      if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
 +                      if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
                ) {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // AwaitingChannelReady set, though our peer could have sent their channel_ready.
                self.context.channel_state.is_local_shutdown_sent()
        }
  
 +      /// Returns true if we initiated to shut down the channel.
 +      pub fn initiated_shutdown(&self) -> bool {
 +              self.context.local_initiated_shutdown.is_some()
 +      }
 +
        /// Returns true if this channel is fully shut down. True here implies that no further actions
        /// may/will be taken on this channel, and thus this object should be freed. Any future changes
        /// will be handled appropriately by the chain monitor.
  
                // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
                // channel_ready until the entire batch is ready.
 -              let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
 +              let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
                        self.context.channel_state.set_our_channel_ready();
                        true
 -              } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
 +              } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
                        self.context.update_time_counter += 1;
                        true
 -              } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
 +              } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
  
 -              let need_holding_cell = self.context.channel_state.should_force_holding_cell();
 +              let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
                                feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
                                to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
                                to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
 -                      }]
 +                      }],
 +                      channel_id: Some(self.context.channel_id()),
                };
                self.context.channel_state.set_awaiting_remote_revoke();
                monitor_update
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
                self.context.channel_state.set_local_shutdown_sent();
 +              self.context.local_initiated_shutdown = Some(());
                self.context.update_time_counter += 1;
  
                let monitor_update = if update_shutdown_script {
                                updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
                                        scriptpubkey: self.get_closing_scriptpubkey(),
                                }],
 +                              channel_id: Some(self.context.channel_id()),
                        };
                        self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
                        self.push_ret_blockable_mon_update(monitor_update)
@@@ -7296,61 -6236,222 +7316,61 @@@ impl<SP: Deref> OutboundV1Channel<SP> w
        where ES::Target: EntropySource,
              F::Target: FeeEstimator
        {
 -              let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
 -              let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
 -              let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
 -              let pubkeys = holder_signer.pubkeys().clone();
 -
 -              if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
 -                      return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
 -              }
 -              if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
 -                      return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
 -              }
 -              let channel_value_msat = channel_value_satoshis * 1000;
 -              if push_msat > channel_value_msat {
 -                      return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
 -              }
 -              if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
 -                      return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
 -              }
                let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
                if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
                        // Protocol level safety check in place, although it should never happen because
                        // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
 -                      return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
 +                      return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
 +                              implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
                }
  
 -              let channel_type = Self::get_initial_channel_type(&config, their_features);
 -              debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
 +              let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
 +              let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
 +              let pubkeys = holder_signer.pubkeys().clone();
  
 -              let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
 -                      (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
 -              } else {
 -                      (ConfirmationTarget::NonAnchorChannelFee, 0)
 +              let chan = Self {
 +                      context: ChannelContext::new_for_outbound_channel(
 +                              fee_estimator,
 +                              entropy_source,
 +                              signer_provider,
 +                              counterparty_node_id,
 +                              their_features,
 +                              channel_value_satoshis,
 +                              push_msat,
 +                              user_id,
 +                              config,
 +                              current_chain_height,
 +                              outbound_scid_alias,
 +                              temporary_channel_id,
 +                              holder_selected_channel_reserve_satoshis,
 +                              channel_keys_id,
 +                              holder_signer,
 +                              pubkeys,
 +                      )?,
 +                      unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
 -              let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
 -
 -              let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
 -              let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
 -              if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
 -                      return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
 -              }
 -
 -              let mut secp_ctx = Secp256k1::new();
 -              secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 -
 -              let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
 -                      match signer_provider.get_shutdown_scriptpubkey() {
 -                              Ok(scriptpubkey) => Some(scriptpubkey),
 -                              Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
 -                      }
 -              } else { None };
 -
 -              if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
 -                      if !shutdown_scriptpubkey.is_compatible(&their_features) {
 -                              return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
 -                      }
 -              }
 +              Ok(chan)
 +      }
  
 -              let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
 -                      Ok(script) => script,
 -                      Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
 +      /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
 +      fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 +              let counterparty_keys = self.context.build_remote_transaction_keys();
 +              let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 +              let signature = match &self.context.holder_signer {
 +                      // TODO (taproot|arik): move match into calling method for Taproot
 +                      ChannelSignerType::Ecdsa(ecdsa) => {
 +                              ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
 +                                      .map(|(sig, _)| sig).ok()?
 +                      },
 +                      // TODO (taproot|arik)
 +                      #[cfg(taproot)]
 +                      _ => todo!()
                };
  
 -              let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
 -
 -              Ok(Self {
 -                      context: ChannelContext {
 -                              user_id,
 -
 -                              config: LegacyChannelConfig {
 -                                      options: config.channel_config.clone(),
 -                                      announced_channel: config.channel_handshake_config.announced_channel,
 -                                      commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
 -                              },
 -
 -                              prev_config: None,
 -
 -                              inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
 -
 -                              channel_id: temporary_channel_id,
 -                              temporary_channel_id: Some(temporary_channel_id),
 -                              channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
 -                              announcement_sigs_state: AnnouncementSigsState::NotSent,
 -                              secp_ctx,
 -                              channel_value_satoshis,
 -
 -                              latest_monitor_update_id: 0,
 -
 -                              holder_signer: ChannelSignerType::Ecdsa(holder_signer),
 -                              shutdown_scriptpubkey,
 -                              destination_script,
 -
 -                              cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 -                              cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 -                              value_to_self_msat,
 -
 -                              pending_inbound_htlcs: Vec::new(),
 -                              pending_outbound_htlcs: Vec::new(),
 -                              holding_cell_htlc_updates: Vec::new(),
 -                              pending_update_fee: None,
 -                              holding_cell_update_fee: None,
 -                              next_holder_htlc_id: 0,
 -                              next_counterparty_htlc_id: 0,
 -                              update_time_counter: 1,
 -
 -                              resend_order: RAACommitmentOrder::CommitmentFirst,
 -
 -                              monitor_pending_channel_ready: false,
 -                              monitor_pending_revoke_and_ack: false,
 -                              monitor_pending_commitment_signed: false,
 -                              monitor_pending_forwards: Vec::new(),
 -                              monitor_pending_failures: Vec::new(),
 -                              monitor_pending_finalized_fulfills: Vec::new(),
 -
 -                              signer_pending_commitment_update: false,
 -                              signer_pending_funding: false,
 -
 -                              #[cfg(debug_assertions)]
 -                              holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 -                              #[cfg(debug_assertions)]
 -                              counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
 -
 -                              last_sent_closing_fee: None,
 -                              pending_counterparty_closing_signed: None,
 -                              expecting_peer_commitment_signed: false,
 -                              closing_fee_limits: None,
 -                              target_closing_feerate_sats_per_kw: None,
 -
 -                              funding_tx_confirmed_in: None,
 -                              funding_tx_confirmation_height: 0,
 -                              short_channel_id: None,
 -                              channel_creation_height: current_chain_height,
 -
 -                              feerate_per_kw: commitment_feerate,
 -                              counterparty_dust_limit_satoshis: 0,
 -                              holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
 -                              counterparty_max_htlc_value_in_flight_msat: 0,
 -                              holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
 -                              counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
 -                              holder_selected_channel_reserve_satoshis,
 -                              counterparty_htlc_minimum_msat: 0,
 -                              holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
 -                              counterparty_max_accepted_htlcs: 0,
 -                              holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
 -                              minimum_depth: None, // Filled in in accept_channel
 -
 -                              counterparty_forwarding_info: None,
 -
 -                              channel_transaction_parameters: ChannelTransactionParameters {
 -                                      holder_pubkeys: pubkeys,
 -                                      holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
 -                                      is_outbound_from_holder: true,
 -                                      counterparty_parameters: None,
 -                                      funding_outpoint: None,
 -                                      channel_type_features: channel_type.clone()
 -                              },
 -                              funding_transaction: None,
 -                              is_batch_funding: None,
 -
 -                              counterparty_cur_commitment_point: None,
 -                              counterparty_prev_commitment_point: None,
 -                              counterparty_node_id,
 -
 -                              counterparty_shutdown_scriptpubkey: None,
 -
 -                              commitment_secrets: CounterpartyCommitmentSecrets::new(),
 -
 -                              channel_update_status: ChannelUpdateStatus::Enabled,
 -                              closing_signed_in_flight: false,
 -
 -                              announcement_sigs: None,
 -
 -                              #[cfg(any(test, fuzzing))]
 -                              next_local_commitment_tx_fee_info_cached: Mutex::new(None),
 -                              #[cfg(any(test, fuzzing))]
 -                              next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 -
 -                              workaround_lnd_bug_4006: None,
 -                              sent_message_awaiting_response: None,
 -
 -                              latest_inbound_scid_alias: None,
 -                              outbound_scid_alias,
 -
 -                              channel_pending_event_emitted: false,
 -                              channel_ready_event_emitted: false,
 -
 -                              #[cfg(any(test, fuzzing))]
 -                              historical_inbound_htlc_fulfills: HashSet::new(),
 -
 -                              channel_type,
 -                              channel_keys_id,
 -
 -                              blocked_monitor_updates: Vec::new(),
 -                      },
 -                      unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
 -              })
 -      }
 -
 -      /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
 -      fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 -              let counterparty_keys = self.context.build_remote_transaction_keys();
 -              let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
 -              let signature = match &self.context.holder_signer {
 -                      // TODO (taproot|arik): move match into calling method for Taproot
 -                      ChannelSignerType::Ecdsa(ecdsa) => {
 -                              ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
 -                                      .map(|(sig, _)| sig).ok()?
 -                      },
 -                      // TODO (taproot|arik)
 -                      #[cfg(taproot)]
 -                      _ => todo!()
 -              };
 -
 -              if self.context.signer_pending_funding {
 -                      log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
 -                      self.context.signer_pending_funding = false;
 -              }
 +              if self.context.signer_pending_funding {
 +                      log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
 +                      self.context.signer_pending_funding = false;
 +              }
  
                Some(msgs::FundingCreated {
                        temporary_channel_id: self.context.temporary_channel_id.unwrap(),
                // Now that we're past error-generating stuff, update our local state:
  
                self.context.channel_state = ChannelState::FundingNegotiated;
 -              self.context.channel_id = funding_txo.to_channel_id();
 +              self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
  
                // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
                // We can skip this if it is a zero-conf channel.
                Ok(funding_created)
        }
  
 -      fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
 -              // The default channel type (ie the first one we try) depends on whether the channel is
 -              // public - if it is, we just go with `only_static_remotekey` as it's the only option
 -              // available. If it's private, we first try `scid_privacy` as it provides better privacy
 -              // with no other changes, and fall back to `only_static_remotekey`.
 -              let mut ret = ChannelTypeFeatures::only_static_remote_key();
 -              if !config.channel_handshake_config.announced_channel &&
 -                      config.channel_handshake_config.negotiate_scid_privacy &&
 -                      their_features.supports_scid_privacy() {
 -                      ret.set_scid_privacy_required();
 -              }
 -
 -              // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
 -              // set it now. If they don't understand it, we'll fall back to our default of
 -              // `only_static_remotekey`.
 -              if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
 -                      their_features.supports_anchors_zero_fee_htlc_tx() {
 -                      ret.set_anchors_zero_fee_htlc_tx_required();
 -              }
 -
 -              ret
 -      }
 -
        /// If we receive an error message, it may only be a rejection of the channel type we tried,
        /// not of our ability to open any channel at all. Thus, on error, we should first call this
        /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
        where
                F::Target: FeeEstimator
        {
 -              if !self.context.is_outbound() ||
 -                      !matches!(
 -                              self.context.channel_state, ChannelState::NegotiatingFunding(flags)
 -                              if flags == NegotiatingFundingFlags::OUR_INIT_SENT
 -                      )
 -              {
 -                      return Err(());
 -              }
 -              if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
 -                      // We've exhausted our options
 -                      return Err(());
 -              }
 -              // We support opening a few different types of channels. Try removing our additional
 -              // features one by one until we've either arrived at our default or the counterparty has
 -              // accepted one.
 -              //
 -              // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
 -              // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
 -              // checks whether the counterparty supports every feature, this would only happen if the
 -              // counterparty is advertising the feature, but rejecting channels proposing the feature for
 -              // whatever reason.
 -              if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
 -                      self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
 -                      self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
 -                      assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
 -              } else if self.context.channel_type.supports_scid_privacy() {
 -                      self.context.channel_type.clear_scid_privacy();
 -              } else {
 -                      self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
 -              }
 -              self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
 +              self.context.maybe_downgrade_channel_features(fee_estimator)?;
                Ok(self.get_open_channel(chain_hash))
        }
  
                let keys = self.context.get_holder_pubkeys();
  
                msgs::OpenChannel {
 -                      chain_hash,
 -                      temporary_channel_id: self.context.channel_id,
 -                      funding_satoshis: self.context.channel_value_satoshis,
 +                      common_fields: msgs::CommonOpenChannelFields {
 +                              chain_hash,
 +                              temporary_channel_id: self.context.channel_id,
 +                              funding_satoshis: self.context.channel_value_satoshis,
 +                              dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 +                              max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
 +                              htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 +                              commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
 +                              to_self_delay: self.context.get_holder_selected_contest_delay(),
 +                              max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 +                              funding_pubkey: keys.funding_pubkey,
 +                              revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 +                              payment_basepoint: keys.payment_point,
 +                              delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 +                              htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 +                              first_per_commitment_point,
 +                              channel_flags: if self.context.config.announced_channel {1} else {0},
 +                              shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 +                                      Some(script) => script.clone().into_inner(),
 +                                      None => Builder::new().into_script(),
 +                              }),
 +                              channel_type: Some(self.context.channel_type.clone()),
 +                      },
                        push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
 -                      dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 -                      max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
                        channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
 -                      htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 -                      feerate_per_kw: self.context.feerate_per_kw as u32,
 -                      to_self_delay: self.context.get_holder_selected_contest_delay(),
 -                      max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 -                      funding_pubkey: keys.funding_pubkey,
 -                      revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 -                      payment_point: keys.payment_point,
 -                      delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 -                      htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 -                      first_per_commitment_point,
 -                      channel_flags: if self.context.config.announced_channel {1} else {0},
 -                      shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 -                              Some(script) => script.clone().into_inner(),
 -                              None => Builder::new().into_script(),
 -                      }),
 -                      channel_type: Some(self.context.channel_type.clone()),
                }
        }
  
                if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
                        return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
                }
 -              if msg.dust_limit_satoshis > 21000000 * 100000000 {
 -                      return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
 +              if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
 +                      return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
                }
                if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
                        return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
                }
 -              if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
 -                      return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
 +              if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
 +                      return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
                }
                if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
                        return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
                                msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
                }
                let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
 -              if msg.htlc_minimum_msat >= full_channel_value_msat {
 -                      return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
 +              if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
 +                      return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
                }
                let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
 -              if msg.to_self_delay > max_delay_acceptable {
 -                      return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
 +              if msg.common_fields.to_self_delay > max_delay_acceptable {
 +                      return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
                }
 -              if msg.max_accepted_htlcs < 1 {
 +              if msg.common_fields.max_accepted_htlcs < 1 {
                        return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
                }
 -              if msg.max_accepted_htlcs > MAX_HTLCS {
 -                      return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
 +              if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
 +                      return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
                }
  
                // Now check against optional parameters as set by config...
 -              if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
 -                      return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
 +              if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
 +                      return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
                }
 -              if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
 -                      return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
 +              if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
 +                      return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
                }
                if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
                        return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
                }
 -              if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
 -                      return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
 +              if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
 +                      return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
                }
 -              if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 -                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
 +              if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 +                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
                }
 -              if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
 -                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
 +              if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
 +                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
                }
 -              if msg.minimum_depth > peer_limits.max_minimum_depth {
 -                      return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
 +              if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
 +                      return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
                }
  
 -              if let Some(ty) = &msg.channel_type {
 +              if let Some(ty) = &msg.common_fields.channel_type {
                        if *ty != self.context.channel_type {
                                return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
                        }
                }
  
                let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
 -                      match &msg.shutdown_scriptpubkey {
 +                      match &msg.common_fields.shutdown_scriptpubkey {
                                &Some(ref script) => {
                                        // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
                                        if script.len() == 0 {
                        }
                } else { None };
  
 -              self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
 -              self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
 +              self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
 +              self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
                self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
 -              self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
 -              self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
 +              self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
 +              self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
  
                if peer_limits.trust_own_funding_0conf {
 -                      self.context.minimum_depth = Some(msg.minimum_depth);
 +                      self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
                } else {
 -                      self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
 +                      self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
                }
  
                let counterparty_pubkeys = ChannelPublicKeys {
 -                      funding_pubkey: msg.funding_pubkey,
 -                      revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
 -                      payment_point: msg.payment_point,
 -                      delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
 -                      htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
 +                      funding_pubkey: msg.common_fields.funding_pubkey,
 +                      revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
 +                      payment_point: msg.common_fields.payment_basepoint,
 +                      delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
 +                      htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
                };
  
                self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
 -                      selected_contest_delay: msg.to_self_delay,
 +                      selected_contest_delay: msg.common_fields.to_self_delay,
                        pubkeys: counterparty_pubkeys,
                });
  
 -              self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
 +              self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
                self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
  
                self.context.channel_state = ChannelState::NegotiatingFunding(
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
 -                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id);
 +                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_bitcoin_tx.txid, Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number,
                        self.context.counterparty_cur_commitment_point.unwrap(),
                        counterparty_initial_commitment_tx.feerate_per_kw(),
 -                      counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
 -                      counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
 -
 -              assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
 -              if self.context.is_batch_funding() {
 -                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
 -              } else {
 -                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
 -              }
 -              self.context.cur_holder_commitment_transaction_number -= 1;
 -              self.context.cur_counterparty_commitment_transaction_number -= 1;
 -
 -              log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
 -
 -              let mut channel = Channel { context: self.context };
 -
 -              let need_channel_ready = channel.check_get_channel_ready(0).is_some();
 -              channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 -              Ok((channel, channel_monitor))
 -      }
 -
 -      /// Indicates that the signer may have some signatures for us, so we should retry if we're
 -      /// blocked.
 -      #[cfg(async_signing)]
 -      pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 -              if self.context.signer_pending_funding && self.context.is_outbound() {
 -                      log_trace!(logger, "Signer unblocked a funding_created");
 -                      self.get_funding_created_msg(logger)
 -              } else { None }
 -      }
 -}
 -
 -/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
 -pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
 -      pub context: ChannelContext<SP>,
 -      pub unfunded_context: UnfundedChannelContext,
 -}
 -
 -/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
 -/// [`msgs::OpenChannel`].
 -pub(super) fn channel_type_from_open_channel(
 -      msg: &msgs::OpenChannel, their_features: &InitFeatures,
 -      our_supported_features: &ChannelTypeFeatures
 -) -> Result<ChannelTypeFeatures, ChannelError> {
 -      if let Some(channel_type) = &msg.channel_type {
 -              if channel_type.supports_any_optional_bits() {
 -                      return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
 -              }
 -
 -              // We only support the channel types defined by the `ChannelManager` in
 -              // `provided_channel_type_features`. The channel type must always support
 -              // `static_remote_key`.
 -              if !channel_type.requires_static_remote_key() {
 -                      return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
 -              }
 -              // Make sure we support all of the features behind the channel type.
 -              if !channel_type.is_subset(our_supported_features) {
 -                      return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
 -              }
 -              let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 -              if channel_type.requires_scid_privacy() && announced_channel {
 -                      return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
 -              }
 -              Ok(channel_type.clone())
 -      } else {
 -              let channel_type = ChannelTypeFeatures::from_init(&their_features);
 -              if channel_type != ChannelTypeFeatures::only_static_remote_key() {
 -                      return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
 -              }
 -              Ok(channel_type)
 -      }
 -}
 -
 -impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 -      /// Creates a new channel from a remote sides' request for one.
 -      /// Assumes chain_hash has already been checked and corresponds with what we expect!
 -      pub fn new<ES: Deref, F: Deref, L: Deref>(
 -              fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
 -              counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
 -              their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
 -              current_chain_height: u32, logger: &L, is_0conf: bool,
 -      ) -> Result<InboundV1Channel<SP>, ChannelError>
 -              where ES::Target: EntropySource,
 -                        F::Target: FeeEstimator,
 -                        L::Target: Logger,
 -      {
 -              let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
 -              let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
 -
 -              // First check the channel type is known, failing before we do anything else if we don't
 -              // support this channel type.
 -              let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
 -
 -              let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
 -              let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
 -              let pubkeys = holder_signer.pubkeys().clone();
 -              let counterparty_pubkeys = ChannelPublicKeys {
 -                      funding_pubkey: msg.funding_pubkey,
 -                      revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
 -                      payment_point: msg.payment_point,
 -                      delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
 -                      htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
 -              };
 -
 -              if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
 -                      return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
 -              }
 -
 -              // Check sanity of message fields:
 -              if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
 -                      return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
 -              }
 -              if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
 -                      return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
 -              }
 -              if msg.channel_reserve_satoshis > msg.funding_satoshis {
 -                      return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
 -              }
 -              let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
 -              if msg.push_msat > full_channel_value_msat {
 -                      return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
 -              }
 -              if msg.dust_limit_satoshis > msg.funding_satoshis {
 -                      return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
 -              }
 -              if msg.htlc_minimum_msat >= full_channel_value_msat {
 -                      return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
 -              }
 -              Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
 -
 -              let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
 -              if msg.to_self_delay > max_counterparty_selected_contest_delay {
 -                      return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
 -              }
 -              if msg.max_accepted_htlcs < 1 {
 -                      return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
 -              }
 -              if msg.max_accepted_htlcs > MAX_HTLCS {
 -                      return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
 -              }
 -
 -              // Now check against optional parameters as set by config...
 -              if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
 -                      return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
 -              }
 -              if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
 -                      return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
 -              }
 -              if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
 -                      return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
 -              }
 -              if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
 -                      return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
 -              }
 -              if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
 -                      return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
 -              }
 -              if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 -                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
 -              }
 -              if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
 -                      return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
 -              }
 -
 -              // Convert things into internal flags and prep our state:
 -
 -              if config.channel_handshake_limits.force_announced_channel_preference {
 -                      if config.channel_handshake_config.announced_channel != announced_channel {
 -                              return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
 -                      }
 -              }
 -
 -              let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
 -              if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 -                      // Protocol level safety check in place, although it should never happen because
 -                      // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
 -                      return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
 -              }
 -              if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
 -                      return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
 -              }
 -              if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
 -                      log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
 -                              msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
 -              }
 -              if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
 -                      return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
 -              }
 -
 -              // check if the funder's amount for the initial commitment tx is sufficient
 -              // for full fee payment plus a few HTLCs to ensure the channel will be useful.
 -              let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
 -                      ANCHOR_OUTPUT_VALUE_SATOSHI * 2
 -              } else {
 -                      0
 -              };
 -              let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
 -              let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
 -              if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
 -                      return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
 -              }
 -
 -              let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
 -              // While it's reasonable for us to not meet the channel reserve initially (if they don't
 -              // want to push much to us), our counterparty should always have more than our reserve.
 -              if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
 -                      return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
 -              }
 -
 -              let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
 -                      match &msg.shutdown_scriptpubkey {
 -                              &Some(ref script) => {
 -                                      // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
 -                                      if script.len() == 0 {
 -                                              None
 -                                      } else {
 -                                              if !script::is_bolt2_compliant(&script, their_features) {
 -                                                      return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
 -                                              }
 -                                              Some(script.clone())
 -                                      }
 -                              },
 -                              // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
 -                              &None => {
 -                                      return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
 -                              }
 -                      }
 -              } else { None };
 -
 -              let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
 -                      match signer_provider.get_shutdown_scriptpubkey() {
 -                              Ok(scriptpubkey) => Some(scriptpubkey),
 -                              Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
 -                      }
 -              } else { None };
 -
 -              if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
 -                      if !shutdown_scriptpubkey.is_compatible(&their_features) {
 -                              return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
 -                      }
 -              }
 -
 -              let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
 -                      Ok(script) => script,
 -                      Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
 -              };
 -
 -              let mut secp_ctx = Secp256k1::new();
 -              secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
 -
 -              let minimum_depth = if is_0conf {
 -                      Some(0)
 -              } else {
 -                      Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
 -              };
 -
 -              let chan = Self {
 -                      context: ChannelContext {
 -                              user_id,
 -
 -                              config: LegacyChannelConfig {
 -                                      options: config.channel_config.clone(),
 -                                      announced_channel,
 -                                      commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
 -                              },
 -
 -                              prev_config: None,
 -
 -                              inbound_handshake_limits_override: None,
 -
 -                              temporary_channel_id: Some(msg.temporary_channel_id),
 -                              channel_id: msg.temporary_channel_id,
 -                              channel_state: ChannelState::NegotiatingFunding(
 -                                      NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
 -                              ),
 -                              announcement_sigs_state: AnnouncementSigsState::NotSent,
 -                              secp_ctx,
 -
 -                              latest_monitor_update_id: 0,
 -
 -                              holder_signer: ChannelSignerType::Ecdsa(holder_signer),
 -                              shutdown_scriptpubkey,
 -                              destination_script,
 -
 -                              cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 -                              cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
 -                              value_to_self_msat: msg.push_msat,
 -
 -                              pending_inbound_htlcs: Vec::new(),
 -                              pending_outbound_htlcs: Vec::new(),
 -                              holding_cell_htlc_updates: Vec::new(),
 -                              pending_update_fee: None,
 -                              holding_cell_update_fee: None,
 -                              next_holder_htlc_id: 0,
 -                              next_counterparty_htlc_id: 0,
 -                              update_time_counter: 1,
 -
 -                              resend_order: RAACommitmentOrder::CommitmentFirst,
 -
 -                              monitor_pending_channel_ready: false,
 -                              monitor_pending_revoke_and_ack: false,
 -                              monitor_pending_commitment_signed: false,
 -                              monitor_pending_forwards: Vec::new(),
 -                              monitor_pending_failures: Vec::new(),
 -                              monitor_pending_finalized_fulfills: Vec::new(),
 -
 -                              signer_pending_commitment_update: false,
 -                              signer_pending_funding: false,
 -
 -                              #[cfg(debug_assertions)]
 -                              holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 -                              #[cfg(debug_assertions)]
 -                              counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
 -
 -                              last_sent_closing_fee: None,
 -                              pending_counterparty_closing_signed: None,
 -                              expecting_peer_commitment_signed: false,
 -                              closing_fee_limits: None,
 -                              target_closing_feerate_sats_per_kw: None,
 -
 -                              funding_tx_confirmed_in: None,
 -                              funding_tx_confirmation_height: 0,
 -                              short_channel_id: None,
 -                              channel_creation_height: current_chain_height,
 -
 -                              feerate_per_kw: msg.feerate_per_kw,
 -                              channel_value_satoshis: msg.funding_satoshis,
 -                              counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
 -                              holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
 -                              counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
 -                              holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
 -                              counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
 -                              holder_selected_channel_reserve_satoshis,
 -                              counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
 -                              holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
 -                              counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
 -                              holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
 -                              minimum_depth,
 +                      counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
 +                      counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
  
 -                              counterparty_forwarding_info: None,
 +              assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
 +              if self.context.is_batch_funding() {
 +                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
 +              } else {
 +                      self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
 +              }
 +              self.context.cur_holder_commitment_transaction_number -= 1;
 +              self.context.cur_counterparty_commitment_transaction_number -= 1;
  
 -                              channel_transaction_parameters: ChannelTransactionParameters {
 -                                      holder_pubkeys: pubkeys,
 -                                      holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
 -                                      is_outbound_from_holder: false,
 -                                      counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
 -                                              selected_contest_delay: msg.to_self_delay,
 -                                              pubkeys: counterparty_pubkeys,
 -                                      }),
 -                                      funding_outpoint: None,
 -                                      channel_type_features: channel_type.clone()
 -                              },
 -                              funding_transaction: None,
 -                              is_batch_funding: None,
 +              log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
  
 -                              counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
 -                              counterparty_prev_commitment_point: None,
 -                              counterparty_node_id,
 +              let mut channel = Channel {
 +                      context: self.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      dual_funding_channel_context: None,
 +              };
  
 -                              counterparty_shutdown_scriptpubkey,
 +              let need_channel_ready = channel.check_get_channel_ready(0).is_some();
 +              channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
 +              Ok((channel, channel_monitor))
 +      }
  
 -                              commitment_secrets: CounterpartyCommitmentSecrets::new(),
 +      /// Indicates that the signer may have some signatures for us, so we should retry if we're
 +      /// blocked.
 +      #[cfg(async_signing)]
 +      pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
 +              if self.context.signer_pending_funding && self.context.is_outbound() {
 +                      log_trace!(logger, "Signer unblocked a funding_created");
 +                      self.get_funding_created_msg(logger)
 +              } else { None }
 +      }
 +}
  
 -                              channel_update_status: ChannelUpdateStatus::Enabled,
 -                              closing_signed_in_flight: false,
 +/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
 +pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
 +      pub context: ChannelContext<SP>,
 +      pub unfunded_context: UnfundedChannelContext,
 +}
  
 -                              announcement_sigs: None,
 +/// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
 +/// [`msgs::CommonOpenChannelFields`].
 +pub(super) fn channel_type_from_open_channel(
 +      common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
 +      our_supported_features: &ChannelTypeFeatures
 +) -> Result<ChannelTypeFeatures, ChannelError> {
 +      if let Some(channel_type) = &common_fields.channel_type {
 +              if channel_type.supports_any_optional_bits() {
 +                      return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
 +              }
  
 -                              #[cfg(any(test, fuzzing))]
 -                              next_local_commitment_tx_fee_info_cached: Mutex::new(None),
 -                              #[cfg(any(test, fuzzing))]
 -                              next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
 +              // We only support the channel types defined by the `ChannelManager` in
 +              // `provided_channel_type_features`. The channel type must always support
 +              // `static_remote_key`.
 +              if !channel_type.requires_static_remote_key() {
 +                      return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
 +              }
 +              // Make sure we support all of the features behind the channel type.
 +              if !channel_type.is_subset(our_supported_features) {
 +                      return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
 +              }
 +              let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
 +              if channel_type.requires_scid_privacy() && announced_channel {
 +                      return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
 +              }
 +              Ok(channel_type.clone())
 +      } else {
 +              let channel_type = ChannelTypeFeatures::from_init(&their_features);
 +              if channel_type != ChannelTypeFeatures::only_static_remote_key() {
 +                      return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
 +              }
 +              Ok(channel_type)
 +      }
 +}
  
 -                              workaround_lnd_bug_4006: None,
 -                              sent_message_awaiting_response: None,
 +impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
 +      /// Creates a new channel from a remote sides' request for one.
 +      /// Assumes chain_hash has already been checked and corresponds with what we expect!
 +      pub fn new<ES: Deref, F: Deref, L: Deref>(
 +              fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
 +              counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
 +              their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
 +              current_chain_height: u32, logger: &L, is_0conf: bool,
 +      ) -> Result<InboundV1Channel<SP>, ChannelError>
 +              where ES::Target: EntropySource,
 +                        F::Target: FeeEstimator,
 +                        L::Target: Logger,
 +      {
 +              let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
  
 -                              latest_inbound_scid_alias: None,
 -                              outbound_scid_alias: 0,
 +              // First check the channel type is known, failing before we do anything else if we don't
 +              // support this channel type.
 +              let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
  
 -                              channel_pending_event_emitted: false,
 -                              channel_ready_event_emitted: false,
 +              let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
 +              let counterparty_pubkeys = ChannelPublicKeys {
 +                      funding_pubkey: msg.common_fields.funding_pubkey,
 +                      revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
 +                      payment_point: msg.common_fields.payment_basepoint,
 +                      delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
 +                      htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
 +              };
  
 -                              #[cfg(any(test, fuzzing))]
 -                              historical_inbound_htlc_fulfills: HashSet::new(),
 +              let chan = Self {
 +                      context: ChannelContext::new_for_inbound_channel(
 +                              fee_estimator,
 +                              entropy_source,
 +                              signer_provider,
 +                              counterparty_node_id,
 +                              their_features,
 +                              user_id,
 +                              config,
 +                              current_chain_height,
 +                              &&logger,
 +                              is_0conf,
 +                              0,
  
 +                              counterparty_pubkeys,
                                channel_type,
 -                              channel_keys_id,
 -
 -                              blocked_monitor_updates: Vec::new(),
 -                      },
 +                              holder_selected_channel_reserve_satoshis,
 +                              msg.channel_reserve_satoshis,
 +                              msg.push_msat,
 +                              msg.common_fields.clone(),
 +                      )?,
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                };
 -
                Ok(chan)
        }
  
                let keys = self.context.get_holder_pubkeys();
  
                msgs::AcceptChannel {
 -                      temporary_channel_id: self.context.channel_id,
 -                      dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 -                      max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
 +                      common_fields: msgs::CommonAcceptChannelFields {
 +                              temporary_channel_id: self.context.channel_id,
 +                              dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 +                              max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
 +                              htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 +                              minimum_depth: self.context.minimum_depth.unwrap(),
 +                              to_self_delay: self.context.get_holder_selected_contest_delay(),
 +                              max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 +                              funding_pubkey: keys.funding_pubkey,
 +                              revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 +                              payment_basepoint: keys.payment_point,
 +                              delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 +                              htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 +                              first_per_commitment_point,
 +                              shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 +                                      Some(script) => script.clone().into_inner(),
 +                                      None => Builder::new().into_script(),
 +                              }),
 +                              channel_type: Some(self.context.channel_type.clone()),
 +                      },
                        channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
 -                      htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 -                      minimum_depth: self.context.minimum_depth.unwrap(),
 -                      to_self_delay: self.context.get_holder_selected_contest_delay(),
 -                      max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 -                      funding_pubkey: keys.funding_pubkey,
 -                      revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 -                      payment_point: keys.payment_point,
 -                      delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 -                      htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 -                      first_per_commitment_point,
 -                      shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 -                              Some(script) => script.clone().into_inner(),
 -                              None => Builder::new().into_script(),
 -                      }),
 -                      channel_type: Some(self.context.channel_type.clone()),
                        #[cfg(taproot)]
                        next_local_nonce: None,
                }
                // Now that we're past error-generating stuff, update our local state:
  
                self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
 -              self.context.channel_id = funding_txo.to_channel_id();
 +              self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
                self.context.cur_counterparty_commitment_transaction_number -= 1;
                self.context.cur_holder_commitment_transaction_number -= 1;
  
                                                          &self.context.channel_transaction_parameters,
                                                          funding_redeemscript.clone(), self.context.channel_value_satoshis,
                                                          obscure_factor,
 -                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id);
 +                                                        holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
                channel_monitor.provide_initial_counterparty_commitment_tx(
                        counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
                        self.context.cur_counterparty_commitment_transaction_number + 1,
                // `ChannelMonitor`.
                let mut channel = Channel {
                        context: self.context,
 +                      #[cfg(any(dual_funding, splicing))]
 +                      dual_funding_channel_context: None,
                };
                let need_channel_ready = channel.check_get_channel_ready(0).is_some();
                channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
        }
  }
  
 -const SERIALIZATION_VERSION: u8 = 3;
 +// A not-yet-funded outbound (from holder) channel using V2 channel establishment.
 +#[cfg(any(dual_funding, splicing))]
 +pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
 +      pub context: ChannelContext<SP>,
 +      pub unfunded_context: UnfundedChannelContext,
 +      #[cfg(any(dual_funding, splicing))]
 +      pub dual_funding_context: DualFundingChannelContext,
 +}
 +
 +#[cfg(any(dual_funding, splicing))]
 +impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
 +      pub fn new<ES: Deref, F: Deref>(
 +              fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
 +              counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
 +              user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
 +              funding_confirmation_target: ConfirmationTarget,
 +      ) -> Result<OutboundV2Channel<SP>, APIError>
 +      where ES::Target: EntropySource,
 +            F::Target: FeeEstimator,
 +      {
 +              let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
 +              let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
 +              let pubkeys = holder_signer.pubkeys().clone();
 +
 +              let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
 +
 +              let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
 +                      funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
 +
 +              let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
 +              let funding_tx_locktime = current_chain_height;
 +
 +              let chan = Self {
 +                      context: ChannelContext::new_for_outbound_channel(
 +                              fee_estimator,
 +                              entropy_source,
 +                              signer_provider,
 +                              counterparty_node_id,
 +                              their_features,
 +                              funding_satoshis,
 +                              0,
 +                              user_id,
 +                              config,
 +                              current_chain_height,
 +                              outbound_scid_alias,
 +                              temporary_channel_id,
 +                              holder_selected_channel_reserve_satoshis,
 +                              channel_keys_id,
 +                              holder_signer,
 +                              pubkeys,
 +                      )?,
 +                      unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
 +                      dual_funding_context: DualFundingChannelContext {
 +                              our_funding_satoshis: funding_satoshis,
 +                              their_funding_satoshis: 0,
 +                              funding_tx_locktime,
 +                              funding_feerate_sat_per_1000_weight,
 +                      }
 +              };
 +              Ok(chan)
 +      }
 +
 +      /// If we receive an error message, it may only be a rejection of the channel type we tried,
 +      /// not of our ability to open any channel at all. Thus, on error, we should first call this
 +      /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
 +      pub(crate) fn maybe_handle_error_without_close<F: Deref>(
 +              &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
 +      ) -> Result<msgs::OpenChannelV2, ()>
 +      where
 +              F::Target: FeeEstimator
 +      {
 +              self.context.maybe_downgrade_channel_features(fee_estimator)?;
 +              Ok(self.get_open_channel_v2(chain_hash))
 +      }
 +
 +      pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
 +              if self.context.have_received_message() {
 +                      debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
 +              }
 +
 +              if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
 +                      debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
 +              }
 +
 +              let first_per_commitment_point = self.context.holder_signer.as_ref()
 +                      .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
 +                              &self.context.secp_ctx);
 +              let second_per_commitment_point = self.context.holder_signer.as_ref()
 +                      .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
 +                              &self.context.secp_ctx);
 +              let keys = self.context.get_holder_pubkeys();
 +
 +              msgs::OpenChannelV2 {
 +                      common_fields: msgs::CommonOpenChannelFields {
 +                              chain_hash,
 +                              temporary_channel_id: self.context.temporary_channel_id.unwrap(),
 +                              funding_satoshis: self.context.channel_value_satoshis,
 +                              dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 +                              max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
 +                              htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 +                              commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
 +                              to_self_delay: self.context.get_holder_selected_contest_delay(),
 +                              max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 +                              funding_pubkey: keys.funding_pubkey,
 +                              revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 +                              payment_basepoint: keys.payment_point,
 +                              delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 +                              htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 +                              first_per_commitment_point,
 +                              channel_flags: if self.context.config.announced_channel {1} else {0},
 +                              shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 +                                      Some(script) => script.clone().into_inner(),
 +                                      None => Builder::new().into_script(),
 +                              }),
 +                              channel_type: Some(self.context.channel_type.clone()),
 +                      },
 +                      funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
 +                      second_per_commitment_point,
 +                      locktime: self.dual_funding_context.funding_tx_locktime,
 +                      require_confirmed_inputs: None,
 +              }
 +      }
 +}
 +
 +// A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
 +#[cfg(any(dual_funding, splicing))]
 +pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
 +      pub context: ChannelContext<SP>,
 +      pub unfunded_context: UnfundedChannelContext,
 +      pub dual_funding_context: DualFundingChannelContext,
 +}
 +
 +#[cfg(any(dual_funding, splicing))]
 +impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
 +      /// Creates a new dual-funded channel from a remote side's request for one.
 +      /// Assumes chain_hash has already been checked and corresponds with what we expect!
 +      pub fn new<ES: Deref, F: Deref, L: Deref>(
 +              fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
 +              counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
 +              their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
 +              config: &UserConfig, current_chain_height: u32, logger: &L,
 +      ) -> Result<InboundV2Channel<SP>, ChannelError>
 +              where ES::Target: EntropySource,
 +                        F::Target: FeeEstimator,
 +                        L::Target: Logger,
 +      {
 +              let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
 +              let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
 +                      channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
 +              let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
 +                      channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
 +
 +              // First check the channel type is known, failing before we do anything else if we don't
 +              // support this channel type.
 +              if msg.common_fields.channel_type.is_none() {
 +                      return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
 +                              msg.common_fields.temporary_channel_id)))
 +              }
 +              let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
 +
 +              let counterparty_pubkeys = ChannelPublicKeys {
 +                      funding_pubkey: msg.common_fields.funding_pubkey,
 +                      revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
 +                      payment_point: msg.common_fields.payment_basepoint,
 +                      delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
 +                      htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
 +              };
 +
 +              let mut context = ChannelContext::new_for_inbound_channel(
 +                      fee_estimator,
 +                      entropy_source,
 +                      signer_provider,
 +                      counterparty_node_id,
 +                      their_features,
 +                      user_id,
 +                      config,
 +                      current_chain_height,
 +                      logger,
 +                      false,
 +
 +                      funding_satoshis,
 +
 +                      counterparty_pubkeys,
 +                      channel_type,
 +                      holder_selected_channel_reserve_satoshis,
 +                      counterparty_selected_channel_reserve_satoshis,
 +                      0 /* push_msat not used in dual-funding */,
 +                      msg.common_fields.clone(),
 +              )?;
 +              let channel_id = ChannelId::v2_from_revocation_basepoints(
 +                      &context.get_holder_pubkeys().revocation_basepoint,
 +                      &context.get_counterparty_pubkeys().revocation_basepoint);
 +              context.channel_id = channel_id;
 +
 +              let chan = Self {
 +                      context,
 +                      unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
 +                      dual_funding_context: DualFundingChannelContext {
 +                              our_funding_satoshis: funding_satoshis,
 +                              their_funding_satoshis: msg.common_fields.funding_satoshis,
 +                              funding_tx_locktime: msg.locktime,
 +                              funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
 +                      }
 +              };
 +
 +              Ok(chan)
 +      }
 +
 +      /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
 +      /// should be sent back to the counterparty node.
 +      ///
 +      /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
 +      pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
 +              if self.context.is_outbound() {
 +                      debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
 +              }
 +              if !matches!(
 +                      self.context.channel_state, ChannelState::NegotiatingFunding(flags)
 +                      if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
 +              ) {
 +                      debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
 +              }
 +              if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
 +                      debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
 +              }
 +
 +              self.generate_accept_channel_v2_message()
 +      }
 +
 +      /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
 +      /// inbound channel. If the intention is to accept an inbound channel, use
 +      /// [`InboundV1Channel::accept_inbound_channel`] instead.
 +      ///
 +      /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
 +      fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
 +              let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
 +                      self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
 +              let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
 +                      self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
 +              let keys = self.context.get_holder_pubkeys();
 +
 +              msgs::AcceptChannelV2 {
 +                      common_fields: msgs::CommonAcceptChannelFields {
 +                              temporary_channel_id: self.context.temporary_channel_id.unwrap(),
 +                              dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
 +                              max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
 +                              htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
 +                              minimum_depth: self.context.minimum_depth.unwrap(),
 +                              to_self_delay: self.context.get_holder_selected_contest_delay(),
 +                              max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
 +                              funding_pubkey: keys.funding_pubkey,
 +                              revocation_basepoint: keys.revocation_basepoint.to_public_key(),
 +                              payment_basepoint: keys.payment_point,
 +                              delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
 +                              htlc_basepoint: keys.htlc_basepoint.to_public_key(),
 +                              first_per_commitment_point,
 +                              shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
 +                                      Some(script) => script.clone().into_inner(),
 +                                      None => Builder::new().into_script(),
 +                              }),
 +                              channel_type: Some(self.context.channel_type.clone()),
 +                      },
 +                      funding_satoshis: self.dual_funding_context.our_funding_satoshis,
 +                      second_per_commitment_point,
 +                      require_confirmed_inputs: None,
 +              }
 +      }
 +
 +      /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
 +      /// inbound channel without accepting it.
 +      ///
 +      /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
 +      #[cfg(test)]
 +      pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
 +              self.generate_accept_channel_v2_message()
 +      }
 +}
 +
 +// Unfunded channel utilities
 +
 +fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
 +      // The default channel type (ie the first one we try) depends on whether the channel is
 +      // public - if it is, we just go with `only_static_remotekey` as it's the only option
 +      // available. If it's private, we first try `scid_privacy` as it provides better privacy
 +      // with no other changes, and fall back to `only_static_remotekey`.
 +      let mut ret = ChannelTypeFeatures::only_static_remote_key();
 +      if !config.channel_handshake_config.announced_channel &&
 +              config.channel_handshake_config.negotiate_scid_privacy &&
 +              their_features.supports_scid_privacy() {
 +              ret.set_scid_privacy_required();
 +      }
 +
 +      // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
 +      // set it now. If they don't understand it, we'll fall back to our default of
 +      // `only_static_remotekey`.
 +      if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
 +              their_features.supports_anchors_zero_fee_htlc_tx() {
 +              ret.set_anchors_zero_fee_htlc_tx_required();
 +      }
 +
 +      ret
 +}
 +
 +const SERIALIZATION_VERSION: u8 = 4;
  const MIN_SERIALIZATION_VERSION: u8 = 3;
  
  impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
@@@ -8369,18 -7479,7 +8389,18 @@@ impl<SP: Deref> Writeable for Channel<S
                // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
                // called.
  
 -              write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
 +              let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
 +                      InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
 +                              InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
 +                              matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
 +                      },
 +                      _ => false,
 +              }) {
 +                      SERIALIZATION_VERSION
 +              } else {
 +                      MIN_SERIALIZATION_VERSION
 +              };
 +              write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
  
                // `user_id` used to be a single u64 value. In order to remain backwards compatible with
                // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
                        let mut channel_state = self.context.channel_state;
                        if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
                                channel_state.set_peer_disconnected();
 +                      } else {
 +                              debug_assert!(false, "Pre-funded/shutdown channels should not be written");
                        }
                        channel_state.to_u32().write(writer)?;
                }
                        htlc.payment_hash.write(writer)?;
                        match &htlc.state {
                                &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
 -                              &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
 +                              &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
                                        1u8.write(writer)?;
 -                                      htlc_state.write(writer)?;
 +                                      if version_to_write <= 3 {
 +                                              if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
 +                                                      pending_htlc_status.write(writer)?;
 +                                              } else {
 +                                                      panic!();
 +                                              }
 +                                      } else {
 +                                              htlc_resolution.write(writer)?;
 +                                      }
                                },
 -                              &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
 +                              &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
                                        2u8.write(writer)?;
 -                                      htlc_state.write(writer)?;
 +                                      if version_to_write <= 3 {
 +                                              if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
 +                                                      pending_htlc_status.write(writer)?;
 +                                              } else {
 +                                                      panic!();
 +                                              }
 +                                      } else {
 +                                              htlc_resolution.write(writer)?;
 +                                      }
                                },
                                &InboundHTLCState::Committed => {
                                        3u8.write(writer)?;
  
                let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
  
 +              let mut monitor_pending_update_adds = None;
 +              if !self.context.monitor_pending_update_adds.is_empty() {
 +                      monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
 +              }
 +
                write_tlv_fields!(writer, {
                        (0, self.context.announcement_sigs, option),
                        // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
                        (7, self.context.shutdown_scriptpubkey, option),
                        (8, self.context.blocked_monitor_updates, optional_vec),
                        (9, self.context.target_closing_feerate_sats_per_kw, option),
 +                      (10, monitor_pending_update_adds, option), // Added in 0.0.122
                        (11, self.context.monitor_pending_finalized_fulfills, required_vec),
                        (13, self.context.channel_creation_height, required),
                        (15, preimages, required_vec),
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
 +                      // 45 and 47 are reserved for async signing
 +                      (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
  
                Ok(())
@@@ -8802,22 -7875,8 +8822,22 @@@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> 
                                cltv_expiry: Readable::read(reader)?,
                                payment_hash: Readable::read(reader)?,
                                state: match <u8 as Readable>::read(reader)? {
 -                                      1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
 -                                      2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
 +                                      1 => {
 +                                              let resolution = if ver <= 3 {
 +                                                      InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
 +                                              } else {
 +                                                      Readable::read(reader)?
 +                                              };
 +                                              InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
 +                                      },
 +                                      2 => {
 +                                              let resolution = if ver <= 3 {
 +                                                      InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
 +                                              } else {
 +                                                      Readable::read(reader)?
 +                                              };
 +                                              InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
 +                                      },
                                        3 => InboundHTLCState::Committed,
                                        4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
                                        _ => return Err(DecodeError::InvalidValue),
                let channel_update_status = Readable::read(reader)?;
  
                #[cfg(any(test, fuzzing))]
 -              let mut historical_inbound_htlc_fulfills = HashSet::new();
 +              let mut historical_inbound_htlc_fulfills = new_hash_set();
                #[cfg(any(test, fuzzing))]
                {
                        let htlc_fulfills_len: u64 = Readable::read(reader)?;
  
                let mut is_batch_funding: Option<()> = None;
  
 +              let mut local_initiated_shutdown: Option<()> = None;
 +
                let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
                let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
  
                let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
 +              let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
  
                read_tlv_fields!(reader, {
                        (0, announcement_sigs, option),
                        (7, shutdown_scriptpubkey, option),
                        (8, blocked_monitor_updates, optional_vec),
                        (9, target_closing_feerate_sats_per_kw, option),
 +                      (10, monitor_pending_update_adds, option), // Added in 0.0.122
                        (11, monitor_pending_finalized_fulfills, optional_vec),
                        (13, channel_creation_height, option),
                        (15, preimages_opt, optional_vec),
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
 +                      // 45 and 47 are reserved for async signing
 +                      (49, local_initiated_shutdown, option),
                });
  
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
                                monitor_pending_forwards,
                                monitor_pending_failures,
                                monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
 +                              monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
  
                                signer_pending_commitment_update: false,
                                signer_pending_funding: false,
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
  
 +                              local_initiated_shutdown,
 +
                                blocked_monitor_updates: blocked_monitor_updates.unwrap(),
 -                      }
 +                      },
 +                      #[cfg(any(dual_funding, splicing))]
 +                      dual_funding_channel_context: None,
                })
        }
  }
@@@ -9350,18 -8398,6 +9370,18 @@@ mod tests 
        use bitcoin::address::{WitnessProgram, WitnessVersion};
        use crate::prelude::*;
  
 +      #[test]
 +      fn test_channel_state_order() {
 +              use crate::ln::channel::NegotiatingFundingFlags;
 +              use crate::ln::channel::AwaitingChannelReadyFlags;
 +              use crate::ln::channel::ChannelReadyFlags;
 +
 +              assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
 +              assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
 +              assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
 +              assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
 +      }
 +
        struct TestFeeEstimator {
                fee_est: u32
        }
                // same as the old fee.
                fee_est.fee_est = 500;
                let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
 -              assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
 +              assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
        }
  
        #[test]
  
                // Node B --> Node A: accept channel, explicitly setting B's dust limit.
                let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
 -              accept_channel_msg.dust_limit_satoshis = 546;
 +              accept_channel_msg.common_fields.dust_limit_satoshis = 546;
                node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
                node_a_chan.context.holder_dust_limit_satoshis = 1560;
  
  
                // Node B --> Node A: accept channel, explicitly setting B's dust limit.
                let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
 -              accept_channel_msg.dust_limit_satoshis = 546;
 +              accept_channel_msg.common_fields.dust_limit_satoshis = 546;
                node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
                node_a_chan.context.holder_dust_limit_satoshis = 1560;
  
        fn blinding_point_skimmed_fee_malformed_ser() {
                // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
                // properly.
 +              let logger = test_utils::TestLogger::new();
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
 +              let best_block = BestBlock::from_network(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
  
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let features = channelmanager::provided_init_features(&config);
 -              let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
 -              let mut chan = Channel { context: outbound_chan.context };
 +              let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
 +                      &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
 +              ).unwrap();
 +              let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
 +                      &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
 +                      &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
 +              ).unwrap();
 +              outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
 +              let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
 +                      value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
 +              }]};
 +              let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
 +              let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
 +              let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
 +                      Ok((chan, _, _)) => chan,
 +                      Err((_, e)) => panic!("{}", e),
 +              };
  
                let dummy_htlc_source = HTLCSource::OutboundRoute {
                        path: Path {
                channel_type_features.set_zero_conf_required();
  
                let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
 -              open_channel_msg.channel_type = Some(channel_type_features);
 +              open_channel_msg.common_fields.channel_type = Some(channel_type_features);
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
                let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
                        node_b_node_id, &channelmanager::provided_channel_type_features(&config),
  
                // Set `channel_type` to `None` to force the implicit feature negotiation.
                let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
 -              open_channel_msg.channel_type = None;
 +              open_channel_msg.common_fields.channel_type = None;
  
                // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
                // `static_remote_key`, it will fail the channel.
                ).unwrap();
  
                let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
 -              open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
 +              open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
  
                let res = InboundV1Channel::<&TestKeysInterface>::new(
                        &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
                ).unwrap();
  
                let mut accept_channel_msg = channel_b.get_accept_channel_message();
 -              accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
 +              accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
  
                let res = channel_a.accept_channel(
                        &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
index 50366faad1c18ca6a68c0b0ae42037d4ebdfacac,9d807f854bd3e44fee105b21cbd109cfb2e172e6..05d4351509eb5d6442841656f83c4d0210c02151
@@@ -31,8 -31,8 +31,8 @@@ use bitcoin::secp256k1::{SecretKey,Publ
  use bitcoin::secp256k1::Secp256k1;
  use bitcoin::{secp256k1, Sequence};
  
 -use crate::blinded_path::BlindedPath;
 -use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
 +use crate::blinded_path::{BlindedPath, NodeIdLookUp};
 +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs};
  use crate::chain;
  use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
  use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
@@@ -44,7 -44,6 +44,7 @@@ use crate::events::{Event, EventHandler
  // construct one themselves.
  use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
  use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
 +pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
  use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
  #[cfg(any(feature = "_test_utils", test))]
  use crate::ln::features::Bolt11InvoiceFeatures;
@@@ -58,10 -57,10 +58,10 @@@ use crate::ln::msgs::{ChannelMessageHan
  use crate::ln::outbound_payment;
  use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
  use crate::ln::wire::Encode;
 -use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, InvoiceBuilder};
 +use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
  use crate::offers::invoice_error::InvoiceError;
 -use crate::offers::merkle::SignError;
 -use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 +use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
 +use crate::offers::offer::{Offer, OfferBuilder};
  use crate::offers::parse::Bolt12SemanticError;
  use crate::offers::refund::{Refund, RefundBuilder};
  use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
@@@ -77,17 -76,11 +77,17 @@@ use crate::util::logger::{Level, Logger
  use crate::util::errors::APIError;
  #[cfg(not(c_bindings))]
  use {
 +      crate::offers::offer::DerivedMetadata,
        crate::routing::router::DefaultRouter,
        crate::routing::gossip::NetworkGraph,
        crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
        crate::sign::KeysManager,
  };
 +#[cfg(c_bindings)]
 +use {
 +      crate::offers::offer::OfferWithDerivedMetadataBuilder,
 +      crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
 +};
  
  use alloc::collections::{btree_map, BTreeMap};
  
@@@ -155,11 -148,6 +155,11 @@@ pub enum PendingHTLCRouting 
                /// [`Event::PaymentClaimable::onion_fields`] as
                /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
 +              /// The context of the payment included by the recipient in a blinded path, or `None` if a
 +              /// blinded path was not used.
 +              ///
 +              /// Used in part to determine the [`events::PaymentPurpose`].
 +              payment_context: Option<PaymentContext>,
                /// CLTV expiry of the received HTLC.
                ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
                /// For HTLCs received by LDK, these will ultimately bubble back up as
                /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
 +              /// Set if this HTLC is the final hop in a multi-hop blinded path.
 +              requires_blinded_error: bool,
        },
  }
  
@@@ -227,7 -213,6 +227,7 @@@ impl PendingHTLCRouting 
                match self {
                        Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
                        Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
 +                      Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
                        _ => None,
                }
        }
@@@ -303,7 -288,6 +303,7 @@@ pub(super) struct PendingAddHTLCInfo 
        // Note that this may be an outbound SCID alias for the associated channel.
        prev_short_channel_id: u64,
        prev_htlc_id: u64,
 +      prev_channel_id: ChannelId,
        prev_funding_outpoint: OutPoint,
        prev_user_channel_id: u128,
  }
@@@ -344,7 -328,6 +344,7 @@@ pub(crate) struct HTLCPreviousHopData 
        incoming_packet_shared_secret: [u8; 32],
        phantom_shared_secret: Option<[u8; 32]>,
        blinded_failure: Option<BlindedFailure>,
 +      channel_id: ChannelId,
  
        // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
        // channel with a preimage provided by the forward channel.
@@@ -357,11 -340,6 +357,11 @@@ enum OnionPayload 
                /// This is only here for backwards-compatibility in serialization, in the future it can be
                /// removed, breaking clients running 0.0.106 and earlier.
                _legacy_hop_data: Option<msgs::FinalOnionHopData>,
 +              /// The context of the payment included by the recipient in a blinded path, or `None` if a
 +              /// blinded path was not used.
 +              ///
 +              /// Used in part to determine the [`events::PaymentPurpose`].
 +              payment_context: Option<PaymentContext>,
        },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
@@@ -390,7 -368,7 +390,7 @@@ struct ClaimableHTLC 
  impl From<&ClaimableHTLC> for events::ClaimedHTLC {
        fn from(val: &ClaimableHTLC) -> Self {
                events::ClaimedHTLC {
 -                      channel_id: val.prev_hop.outpoint.to_channel_id(),
 +                      channel_id: val.prev_hop.channel_id,
                        user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
                        cltv_expiry: val.cltv_expiry,
                        value_msat: val.value,
@@@ -729,7 -707,7 +729,7 @@@ enum BackgroundEvent 
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
 -      ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
 +      ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
        /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
        /// channel to continue normal operation.
        ///
        MonitorUpdateRegeneratedOnStartup {
                counterparty_node_id: PublicKey,
                funding_txo: OutPoint,
 +              channel_id: ChannelId,
                update: ChannelMonitorUpdate
        },
        /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
@@@ -772,7 -749,7 +772,7 @@@ pub(crate) enum MonitorUpdateCompletion
        /// outbound edge.
        EmitEventAndFreeOtherChannel {
                event: events::Event,
 -              downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
 +              downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
        },
        /// Indicates we should immediately resume the operation of another channel, unless there is
        /// some other reason why the channel is blocked. In practice this simply means immediately
                downstream_counterparty_node_id: PublicKey,
                downstream_funding_outpoint: OutPoint,
                blocking_action: RAAMonitorUpdateBlockingAction,
 +              downstream_channel_id: ChannelId,
        },
  }
  
@@@ -802,9 -778,6 +802,9 @@@ impl_writeable_tlv_based_enum_upgradabl
                (0, downstream_counterparty_node_id, required),
                (2, downstream_funding_outpoint, required),
                (4, blocking_action, required),
 +              // Note that by the time we get past the required read above, downstream_funding_outpoint will be
 +              // filled in, so we can safely unwrap it here.
 +              (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
        },
        (2, EmitEventAndFreeOtherChannel) => {
                (0, event, upgradable_required),
@@@ -822,16 -795,12 +822,16 @@@ pub(crate) enum EventCompletionAction 
        ReleaseRAAChannelMonitorUpdate {
                counterparty_node_id: PublicKey,
                channel_funding_outpoint: OutPoint,
 +              channel_id: ChannelId,
        },
  }
  impl_writeable_tlv_based_enum!(EventCompletionAction,
        (0, ReleaseRAAChannelMonitorUpdate) => {
                (0, channel_funding_outpoint, required),
                (2, counterparty_node_id, required),
 +              // Note that by the time we get past the required read above, channel_funding_outpoint will be
 +              // filled in, so we can safely unwrap it here.
 +              (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
        };
  );
  
@@@ -853,7 -822,7 +853,7 @@@ pub(crate) enum RAAMonitorUpdateBlockin
  impl RAAMonitorUpdateBlockingAction {
        fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
                Self::ForwardedPaymentInboundClaim {
 -                      channel_id: prev_hop.outpoint.to_channel_id(),
 +                      channel_id: prev_hop.channel_id,
                        htlc_id: prev_hop.htlc_id,
                }
        }
@@@ -912,7 -881,7 +912,7 @@@ pub(super) struct PeerState<SP: Deref> 
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
 -      is_connected: bool,
 +      pub is_connected: bool,
  }
  
  impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
 -              self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
 +              !self.channel_by_id.iter().any(|(_, phase)|
 +                      match phase {
 +                              ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
 +                              ChannelPhase::UnfundedInboundV1(_) => false,
 +                              #[cfg(any(dual_funding, splicing))]
 +                              ChannelPhase::UnfundedOutboundV2(_) => true,
 +                              #[cfg(any(dual_funding, splicing))]
 +                              ChannelPhase::UnfundedInboundV2(_) => false,
 +                      }
 +              )
                        && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
@@@ -1004,7 -964,6 +1004,7 @@@ pub type SimpleArcChannelManager<M, T, 
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
 +              Arc<KeysManager>,
                Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
                ProbabilisticScoringFeeParameters,
                ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
@@@ -1035,7 -994,6 +1035,7 @@@ pub type SimpleRefChannelManager<'a, 'b
                &'e DefaultRouter<
                        &'f NetworkGraph<&'g L>,
                        &'g L,
 +                      &'c KeysManager,
                        &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
                        ProbabilisticScoringFeeParameters,
                        ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
        fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
  }
  
 -/// Manager which keeps track of a number of channels and sends messages to the appropriate
 -/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
 +/// A lightning node's channel state machine and payment management logic, which facilitates
 +/// sending, forwarding, and receiving payments through lightning channels.
 +///
 +/// [`ChannelManager`] is parameterized by a number of components to achieve this.
 +/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
 +///   channel
 +/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
 +///   closing channels
 +/// - [`EntropySource`] for providing random data needed for cryptographic operations
 +/// - [`NodeSigner`] for cryptographic operations scoped to the node
 +/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
 +/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
 +///   timely manner
 +/// - [`Router`] for finding payment paths when initiating and retrying payments
 +/// - [`Logger`] for logging operational information of varying degrees
 +///
 +/// Additionally, it implements the following traits:
 +/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
 +/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
 +/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
 +/// - [`EventsProvider`] to generate user-actionable [`Event`]s
 +/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
 +///
 +/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
 +/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
 +///
 +/// # `ChannelManager` vs `ChannelMonitor`
 +///
 +/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
 +/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
 +/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
 +/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
 +/// [`chain::Watch`] of them.
 +///
 +/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
 +/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
 +/// for any pertinent on-chain activity, enforcing claims as needed.
 +///
 +/// This division of off-chain management and on-chain enforcement allows for interesting node
 +/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
 +/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
 +///
 +/// # Initialization
 +///
 +/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
 +/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
 +/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
 +/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
 +/// detailed in the [`ChannelManagerReadArgs`] documentation.
 +///
 +/// ```
 +/// use bitcoin::BlockHash;
 +/// use bitcoin::network::constants::Network;
 +/// use lightning::chain::BestBlock;
 +/// # use lightning::chain::channelmonitor::ChannelMonitor;
 +/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
 +/// # use lightning::routing::gossip::NetworkGraph;
 +/// use lightning::util::config::UserConfig;
 +/// use lightning::util::ser::ReadableArgs;
 +///
 +/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
 +/// # fn example<
 +/// #     'a,
 +/// #     L: lightning::util::logger::Logger,
 +/// #     ES: lightning::sign::EntropySource,
 +/// #     S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
 +/// #     SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
 +/// #     SP: Sized,
 +/// #     R: lightning::io::Read,
 +/// # >(
 +/// #     fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
 +/// #     chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
 +/// #     tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
 +/// #     router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
 +/// #     logger: &L,
 +/// #     entropy_source: &ES,
 +/// #     node_signer: &dyn lightning::sign::NodeSigner,
 +/// #     signer_provider: &lightning::sign::DynSignerProvider,
 +/// #     best_block: lightning::chain::BestBlock,
 +/// #     current_timestamp: u32,
 +/// #     mut reader: R,
 +/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
 +/// // Fresh start with no channels
 +/// let params = ChainParameters {
 +///     network: Network::Bitcoin,
 +///     best_block,
 +/// };
 +/// let default_config = UserConfig::default();
 +/// let channel_manager = ChannelManager::new(
 +///     fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer,
 +///     signer_provider, default_config, params, current_timestamp
 +/// );
 +///
 +/// // Restart from deserialized data
 +/// let mut channel_monitors = read_channel_monitors();
 +/// let args = ChannelManagerReadArgs::new(
 +///     entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
 +///     router, logger, default_config, channel_monitors.iter_mut().collect()
 +/// );
 +/// let (block_hash, channel_manager) =
 +///     <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
 +///
 +/// // Update the ChannelManager and ChannelMonitors with the latest chain data
 +/// // ...
 +///
 +/// // Move the monitors to the ChannelManager's chain::Watch parameter
 +/// for monitor in channel_monitors {
 +///     chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 +/// }
 +/// # Ok(())
 +/// # }
 +/// ```
 +///
 +/// # Operation
 +///
 +/// The following is required for [`ChannelManager`] to function properly:
 +/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
 +///   called by [`PeerManager::read_event`] when processing network I/O)
 +/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
 +///   (typically initiated when [`PeerManager::process_events`] is called)
 +/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
 +///   as documented by those traits
 +/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
 +///   every minute
 +/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
 +///   [`Persister`] such as a [`KVStore`] implementation
 +/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
 +///
 +/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
 +/// when the last two requirements need to be checked.
 +///
 +/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
 +/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
 +/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
 +/// crate. For languages other than Rust, the availability of similar utilities may vary.
 +///
 +/// # Channels
 +///
 +/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
 +/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
 +/// currently open channels.
 +///
 +/// ```
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// #
 +/// # fn example<T: AChannelManager>(channel_manager: T) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let channels = channel_manager.list_usable_channels();
 +/// for details in channels {
 +///     println!("{:?}", details);
 +/// }
 +/// # }
 +/// ```
 +///
 +/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
 +/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
 +/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
 +/// by [`ChannelManager`].
 +///
 +/// ## Opening Channels
 +///
 +/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
 +/// opening an outbound channel, which requires self-funding when handling
 +/// [`Event::FundingGenerationReady`].
 +///
 +/// ```
 +/// # use bitcoin::{ScriptBuf, Transaction};
 +/// # use bitcoin::secp256k1::PublicKey;
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// # use lightning::events::{Event, EventsProvider};
 +/// #
 +/// # trait Wallet {
 +/// #     fn create_funding_transaction(
 +/// #         &self, _amount_sats: u64, _output_script: ScriptBuf
 +/// #     ) -> Transaction;
 +/// # }
 +/// #
 +/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let value_sats = 1_000_000;
 +/// let push_msats = 10_000_000;
 +/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
 +///     Ok(channel_id) => println!("Opening channel {}", channel_id),
 +///     Err(e) => println!("Error opening channel: {:?}", e),
 +/// }
 +///
 +/// // On the event processing thread once the peer has responded
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::FundingGenerationReady {
 +///         temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
 +///         user_channel_id, ..
 +///     } => {
 +///         assert_eq!(user_channel_id, 42);
 +///         let funding_transaction = wallet.create_funding_transaction(
 +///             channel_value_satoshis, output_script
 +///         );
 +///         match channel_manager.funding_transaction_generated(
 +///             &temporary_channel_id, &counterparty_node_id, funding_transaction
 +///         ) {
 +///             Ok(()) => println!("Funding channel {}", temporary_channel_id),
 +///             Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
 +///         }
 +///     },
 +///     Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
 +///         assert_eq!(user_channel_id, 42);
 +///         println!(
 +///             "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
 +///             former_temporary_channel_id.unwrap()
 +///         );
 +///     },
 +///     Event::ChannelReady { channel_id, user_channel_id, .. } => {
 +///         assert_eq!(user_channel_id, 42);
 +///         println!("Channel {} ready", channel_id);
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// ## Accepting Channels
 +///
 +/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
 +/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
 +/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
 +///
 +/// ```
 +/// # use bitcoin::secp256k1::PublicKey;
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// # use lightning::events::{Event, EventsProvider};
 +/// #
 +/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
 +/// #     // ...
 +/// #     unimplemented!()
 +/// # }
 +/// #
 +/// # fn example<T: AChannelManager>(channel_manager: T) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
 +///         if !is_trusted(counterparty_node_id) {
 +///             match channel_manager.force_close_without_broadcasting_txn(
 +///                 &temporary_channel_id, &counterparty_node_id
 +///             ) {
 +///                 Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
 +///                 Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
 +///             }
 +///             return;
 +///         }
 +///
 +///         let user_channel_id = 43;
 +///         match channel_manager.accept_inbound_channel(
 +///             &temporary_channel_id, &counterparty_node_id, user_channel_id
 +///         ) {
 +///             Ok(()) => println!("Accepting channel {}", temporary_channel_id),
 +///             Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
 +///         }
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// ## Closing Channels
 +///
 +/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
 +/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
 +/// lower fees and immediate access to funds. However, the latter may be necessary if the
 +/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
 +/// once the channel has been closed successfully.
 +///
 +/// ```
 +/// # use bitcoin::secp256k1::PublicKey;
 +/// # use lightning::ln::ChannelId;
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// # use lightning::events::{Event, EventsProvider};
 +/// #
 +/// # fn example<T: AChannelManager>(
 +/// #     channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
 +/// # ) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
 +///     Ok(()) => println!("Closing channel {}", channel_id),
 +///     Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
 +/// }
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::ChannelClosed { channel_id, user_channel_id, ..  } => {
 +///         assert_eq!(user_channel_id, 42);
 +///         println!("Channel {} closed", channel_id);
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// # Payments
 +///
 +/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
 +/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
 +/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
 +/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
 +/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
 +/// HTLCs.
 +///
 +/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
 +/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
 +/// for a payment will be retried according to the payment's [`Retry`] strategy or until
 +/// [`abandon_payment`] is called.
 +///
 +/// ## BOLT 11 Invoices
 +///
 +/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. Specifically, use the
 +/// functions in its `utils` module for constructing invoices that are compatible with
 +/// [`ChannelManager`]. These functions serve as a convenience for building invoices with the
 +/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
 +/// own [`PaymentHash`], use [`create_inbound_payment_for_hash`] or the corresponding functions in
 +/// the [`lightning-invoice`] `utils` module.
 +///
 +/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
 +/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
 +/// an [`Event::PaymentClaimed`].
 +///
 +/// ```
 +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// #
 +/// # fn example<T: AChannelManager>(channel_manager: T) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// // Or use utils::create_invoice_from_channelmanager
 +/// let known_payment_hash = match channel_manager.create_inbound_payment(
 +///     Some(10_000_000), 3600, None
 +/// ) {
 +///     Ok((payment_hash, _payment_secret)) => {
 +///         println!("Creating inbound payment {}", payment_hash);
 +///         payment_hash
 +///     },
 +///     Err(()) => panic!("Error creating inbound payment"),
 +/// };
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
 +///         PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
 +///             assert_eq!(payment_hash, known_payment_hash);
 +///             println!("Claiming payment {}", payment_hash);
 +///             channel_manager.claim_funds(payment_preimage);
 +///         },
 +///         PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
 +///             println!("Unknown payment hash: {}", payment_hash);
 +///         },
 +///         PaymentPurpose::SpontaneousPayment(payment_preimage) => {
 +///             assert_ne!(payment_hash, known_payment_hash);
 +///             println!("Claiming spontaneous payment {}", payment_hash);
 +///             channel_manager.claim_funds(payment_preimage);
 +///         },
 +///         // ...
 +/// #         _ => {},
 +///     },
 +///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
 +///         assert_eq!(payment_hash, known_payment_hash);
 +///         println!("Claimed {} msats", amount_msat);
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// For paying an invoice, [`lightning-invoice`] provides a `payment` module with convenience
 +/// functions for use with [`send_payment`].
 +///
 +/// ```
 +/// # use lightning::events::{Event, EventsProvider};
 +/// # use lightning::ln::PaymentHash;
 +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
 +/// # use lightning::routing::router::RouteParameters;
 +/// #
 +/// # fn example<T: AChannelManager>(
 +/// #     channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
 +/// #     route_params: RouteParameters, retry: Retry
 +/// # ) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// // let (payment_hash, recipient_onion, route_params) =
 +/// //     payment::payment_parameters_from_invoice(&invoice);
 +/// let payment_id = PaymentId([42; 32]);
 +/// match channel_manager.send_payment(
 +///     payment_hash, recipient_onion, payment_id, route_params, retry
 +/// ) {
 +///     Ok(()) => println!("Sending payment with hash {}", payment_hash),
 +///     Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
 +/// }
 +///
 +/// let expected_payment_id = payment_id;
 +/// let expected_payment_hash = payment_hash;
 +/// assert!(
 +///     channel_manager.list_recent_payments().iter().find(|details| matches!(
 +///         details,
 +///         RecentPaymentDetails::Pending {
 +///             payment_id: expected_payment_id,
 +///             payment_hash: expected_payment_hash,
 +///             ..
 +///         }
 +///     )).is_some()
 +/// );
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
 +///     Event::PaymentFailed { payment_hash, .. } => println!("Failed paying {}", payment_hash),
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// ## BOLT 12 Offers
 +///
 +/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
 +/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
 +/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
 +/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
 +/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
 +/// stateless just as BOLT 11 invoices are.
 +///
 +/// ```
 +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// # use lightning::offers::parse::Bolt12SemanticError;
 +/// #
 +/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let offer = channel_manager
 +///     .create_offer_builder("coffee".to_string())?
 +/// # ;
 +/// # // Needed for compiling for c_bindings
 +/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
 +/// # let offer = builder
 +///     .amount_msats(10_000_000)
 +///     .build()?;
 +/// let bech32_offer = offer.to_string();
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
 +///         PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
 +///             println!("Claiming payment {}", payment_hash);
 +///             channel_manager.claim_funds(payment_preimage);
 +///         },
 +///         PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
 +///             println!("Unknown payment hash: {}", payment_hash);
 +///         },
 +///         // ...
 +/// #         _ => {},
 +///     },
 +///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
 +///         println!("Claimed {} msats", amount_msat);
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # Ok(())
 +/// # }
 +/// ```
 +///
 +/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
 +/// and pays the [`Bolt12Invoice`] response. In addition to success and failure events,
 +/// [`ChannelManager`] may also generate an [`Event::InvoiceRequestFailed`].
 +///
 +/// ```
 +/// # use lightning::events::{Event, EventsProvider};
 +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
 +/// # use lightning::offers::offer::Offer;
 +/// #
 +/// # fn example<T: AChannelManager>(
 +/// #     channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
 +/// #     payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
 +/// # ) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let payment_id = PaymentId([42; 32]);
 +/// match channel_manager.pay_for_offer(
 +///     offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
 +/// ) {
 +///     Ok(()) => println!("Requesting invoice for offer"),
 +///     Err(e) => println!("Unable to request invoice for offer: {:?}", e),
 +/// }
 +///
 +/// // First the payment will be waiting on an invoice
 +/// let expected_payment_id = payment_id;
 +/// assert!(
 +///     channel_manager.list_recent_payments().iter().find(|details| matches!(
 +///         details,
 +///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
 +///     )).is_some()
 +/// );
 +///
 +/// // Once the invoice is received, a payment will be sent
 +/// assert!(
 +///     channel_manager.list_recent_payments().iter().find(|details| matches!(
 +///         details,
 +///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
 +///     )).is_some()
 +/// );
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
 +///     Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
 +///     Event::InvoiceRequestFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// ## BOLT 12 Refunds
 +///
 +/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
 +/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
 +/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
 +/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
 +///
 +/// ```
 +/// # use core::time::Duration;
 +/// # use lightning::events::{Event, EventsProvider};
 +/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
 +/// # use lightning::offers::parse::Bolt12SemanticError;
 +/// #
 +/// # fn example<T: AChannelManager>(
 +/// #     channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
 +/// #     max_total_routing_fee_msat: Option<u64>
 +/// # ) -> Result<(), Bolt12SemanticError> {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let payment_id = PaymentId([42; 32]);
 +/// let refund = channel_manager
 +///     .create_refund_builder(
 +///         "coffee".to_string(), amount_msats, absolute_expiry, payment_id, retry,
 +///         max_total_routing_fee_msat
 +///     )?
 +/// # ;
 +/// # // Needed for compiling for c_bindings
 +/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
 +/// # let refund = builder
 +///     .payer_note("refund for order 1234".to_string())
 +///     .build()?;
 +/// let bech32_refund = refund.to_string();
 +///
 +/// // First the payment will be waiting on an invoice
 +/// let expected_payment_id = payment_id;
 +/// assert!(
 +///     channel_manager.list_recent_payments().iter().find(|details| matches!(
 +///         details,
 +///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
 +///     )).is_some()
 +/// );
 +///
 +/// // Once the invoice is received, a payment will be sent
 +/// assert!(
 +///     channel_manager.list_recent_payments().iter().find(|details| matches!(
 +///         details,
 +///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
 +///     )).is_some()
 +/// );
 +///
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
 +///     Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # Ok(())
 +/// # }
 +/// ```
 +///
 +/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
 +/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
 +///
 +/// ```
 +/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
 +/// # use lightning::ln::channelmanager::AChannelManager;
 +/// # use lightning::offers::refund::Refund;
 +/// #
 +/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
 +/// # let channel_manager = channel_manager.get_cm();
 +/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
 +///     Ok(invoice) => {
 +///         let payment_hash = invoice.payment_hash();
 +///         println!("Requesting refund payment {}", payment_hash);
 +///         payment_hash
 +///     },
 +///     Err(e) => panic!("Unable to request payment for refund: {:?}", e),
 +/// };
  ///
 -/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
 -/// to individual Channels.
 +/// // On the event processing thread
 +/// channel_manager.process_pending_events(&|event| match event {
 +///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
 +///           PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
 +///             assert_eq!(payment_hash, known_payment_hash);
 +///             println!("Claiming payment {}", payment_hash);
 +///             channel_manager.claim_funds(payment_preimage);
 +///         },
 +///           PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
 +///             println!("Unknown payment hash: {}", payment_hash);
 +///           },
 +///         // ...
 +/// #         _ => {},
 +///     },
 +///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
 +///         assert_eq!(payment_hash, known_payment_hash);
 +///         println!("Claimed {} msats", amount_msat);
 +///     },
 +///     // ...
 +/// #     _ => {},
 +/// });
 +/// # }
 +/// ```
 +///
 +/// # Persistence
  ///
  /// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
  /// all peers during write/read (though does not modify this instance, only the instance being
  /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
  /// See [`chain::Listen`] and [`chain::Confirm`] for more details.
  ///
 +/// # `ChannelUpdate` Messages
 +///
  /// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
  /// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
  /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
  /// offline for a full minute. In order to track this, you must call
  /// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
  ///
 +/// # DoS Mitigation
 +///
  /// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
  /// inbound channels without confirmed funding transactions. This may result in nodes which we do
  /// not have a channel with being unable to connect to us or open new channels with us if we have
  /// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
  /// never limited. Please ensure you limit the count of such channels yourself.
  ///
 +/// # Type Aliases
 +///
  /// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
  /// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
  /// essentially you should default to using a [`SimpleRefChannelManager`], and use a
  /// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
  /// you're using lightning-net-tokio.
  ///
 +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
 +/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
 +/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
 +/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
 +/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
 +/// [`timer_tick_occurred`]: Self::timer_tick_occurred
 +/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
 +/// [`Persister`]: crate::util::persist::Persister
 +/// [`KVStore`]: crate::util::persist::KVStore
 +/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
 +/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
 +/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
 +/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
 +/// [`list_channels`]: Self::list_channels
 +/// [`list_usable_channels`]: Self::list_usable_channels
 +/// [`create_channel`]: Self::create_channel
 +/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
 +/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
 +/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
 +/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
 +/// [`list_recent_payments`]: Self::list_recent_payments
 +/// [`abandon_payment`]: Self::abandon_payment
 +/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
 +/// [`create_inbound_payment`]: Self::create_inbound_payment
 +/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
 +/// [`claim_funds`]: Self::claim_funds
 +/// [`send_payment`]: Self::send_payment
 +/// [`offers`]: crate::offers
 +/// [`create_offer_builder`]: Self::create_offer_builder
 +/// [`pay_for_offer`]: Self::pay_for_offer
 +/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
 +/// [`create_refund_builder`]: Self::create_refund_builder
 +/// [`request_refund_payment`]: Self::request_refund_payment
  /// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
  /// [`funding_created`]: msgs::FundingCreated
  /// [`funding_transaction_generated`]: Self::funding_transaction_generated
  /// [`BlockHash`]: bitcoin::hash_types::BlockHash
  /// [`update_channel`]: chain::Watch::update_channel
  /// [`ChannelUpdate`]: msgs::ChannelUpdate
 -/// [`timer_tick_occurred`]: Self::timer_tick_occurred
  /// [`read`]: ReadableArgs::read
  //
  // Lock order:
  //  |   |
  //  |   |__`pending_intercepted_htlcs`
  //  |
 +//  |__`decode_update_add_htlcs`
 +//  |
  //  |__`per_peer_state`
  //      |
  //      |__`pending_inbound_payments`
@@@ -1937,18 -1238,6 +1937,18 @@@ wher
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
  
 +      /// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
 +      ///
 +      /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
 +      /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
 +      /// and via the classic SCID.
 +      ///
 +      /// Note that no consistency guarantees are made about the existence of a channel with the
 +      /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
 +      ///
 +      /// See `ChannelManager` struct-level documentation for lock order requirements.
 +      decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
 +
        /// The sets of payments which are claimable or currently being claimed. See
        /// [`ClaimablePayments`]' individual field docs for more info.
        ///
  
        pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
  
 +      /// Tracks the message events that are to be broadcasted when we are connected to some peer.
 +      pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
 +
        entropy_source: ES,
        node_signer: NS,
        signer_provider: SP,
@@@ -2339,6 -1625,9 +2339,6 @@@ pub struct ChannelDetails 
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
 -      ///
 -      /// Note that, if this has been set, `channel_id` will be equivalent to
 -      /// `funding_txo.unwrap().to_channel_id()`.
        pub funding_txo: Option<OutPoint>,
        /// The features which this channel operates with. See individual features for more info.
        ///
        ///
        /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
        pub config: Option<ChannelConfig>,
 +      /// Pending inbound HTLCs.
 +      ///
 +      /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
 +      pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
 +      /// Pending outbound HTLCs.
 +      ///
 +      /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
 +      pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
  }
  
  impl ChannelDetails {
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
                        channel_shutdown_state: Some(context.shutdown_state()),
 +                      pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
 +                      pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
                }
        }
  }
@@@ -2686,7 -1965,7 +2686,7 @@@ macro_rules! handle_error 
                match $internal {
                        Ok(msg) => Ok(msg),
                        Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
 -                              let mut msg_events = Vec::with_capacity(2);
 +                              let mut msg_event = None;
  
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
                                        let counterparty_node_id = shutdown_res.counterparty_node_id;
  
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
 -                                              msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                              let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
 +                                              pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
  
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
 -                                      msg_events.push(events::MessageSendEvent::HandleError {
 +                                      msg_event = Some(events::MessageSendEvent::HandleError {
                                                node_id: $counterparty_node_id,
                                                action: err.action.clone()
                                        });
                                }
  
 -                              if !msg_events.is_empty() {
 +                              if let Some(msg_event) = msg_event {
                                        let per_peer_state = $self.per_peer_state.read().unwrap();
                                        if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
                                                let mut peer_state = peer_state_mutex.lock().unwrap();
 -                                              peer_state.pending_msg_events.append(&mut msg_events);
 +                                              peer_state.pending_msg_events.push(msg_event);
                                        }
                                }
  
@@@ -2791,14 -2069,6 +2791,14 @@@ macro_rules! convert_chan_phase_err 
                        ChannelPhase::UnfundedInboundV1(channel) => {
                                convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedOutboundV2(channel) => {
 +                              convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
 +                      },
 +                      #[cfg(any(dual_funding, splicing))]
 +                      ChannelPhase::UnfundedInboundV2(channel) => {
 +                              convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
 +                      },
                }
        };
  }
@@@ -2874,7 -2144,6 +2874,7 @@@ macro_rules! emit_channel_pending_even
                                counterparty_node_id: $channel.context.get_counterparty_node_id(),
                                user_channel_id: $channel.context.get_user_id(),
                                funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
 +                              channel_type: Some($channel.context.get_channel_type().clone()),
                        }, None));
                        $channel.context.set_channel_pending_event_emitted();
                }
@@@ -2901,7 -2170,7 +2901,7 @@@ macro_rules! handle_monitor_update_comp
                let logger = WithChannelContext::from(&$self.logger, &$chan.context);
                let mut updates = $chan.monitor_updating_restored(&&logger,
                        &$self.node_signer, $self.chain_hash, &$self.default_configuration,
 -                      $self.best_block.read().unwrap().height());
 +                      $self.best_block.read().unwrap().height);
                let counterparty_node_id = $chan.context.get_counterparty_node_id();
                let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
                        // We only send a channel_update in the case where we are just now sending a
                let update_actions = $peer_state.monitor_update_blocked_actions
                        .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
  
 -              let htlc_forwards = $self.handle_channel_resumption(
 +              let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
                        &mut $peer_state.pending_msg_events, $chan, updates.raa,
 -                      updates.commitment_update, updates.order, updates.accepted_htlcs,
 +                      updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
                        updates.funding_broadcastable, updates.channel_ready,
                        updates.announcement_sigs);
                if let Some(upd) = channel_update {
                if let Some(forwards) = htlc_forwards {
                        $self.forward_htlcs(&mut [forwards][..]);
                }
 +              if let Some(decode) = decode_update_add_htlcs {
 +                      $self.push_decode_update_add_htlcs(decode);
 +              }
                $self.finalize_claims(updates.finalized_claimed_htlcs);
                for failure in updates.failed_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
@@@ -3158,15 -2424,14 +3158,15 @@@ wher
  
                        best_block: RwLock::new(params.best_block),
  
 -                      outbound_scid_aliases: Mutex::new(HashSet::new()),
 -                      pending_inbound_payments: Mutex::new(HashMap::new()),
 +                      outbound_scid_aliases: Mutex::new(new_hash_set()),
 +                      pending_inbound_payments: Mutex::new(new_hash_map()),
                        pending_outbound_payments: OutboundPayments::new(),
 -                      forward_htlcs: Mutex::new(HashMap::new()),
 -                      claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
 -                      pending_intercepted_htlcs: Mutex::new(HashMap::new()),
 -                      outpoint_to_peer: Mutex::new(HashMap::new()),
 -                      short_to_chan_info: FairRwLock::new(HashMap::new()),
 +                      forward_htlcs: Mutex::new(new_hash_map()),
 +                      decode_update_add_htlcs: Mutex::new(new_hash_map()),
 +                      claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
 +                      pending_intercepted_htlcs: Mutex::new(new_hash_map()),
 +                      outpoint_to_peer: Mutex::new(new_hash_map()),
 +                      short_to_chan_info: FairRwLock::new(new_hash_map()),
  
                        our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
                        secp_ctx,
  
                        highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
  
 -                      per_peer_state: FairRwLock::new(HashMap::new()),
 +                      per_peer_state: FairRwLock::new(new_hash_map()),
  
                        pending_events: Mutex::new(VecDeque::new()),
                        pending_events_processor: AtomicBool::new(false),
                        funding_batch_states: Mutex::new(BTreeMap::new()),
  
                        pending_offers_messages: Mutex::new(Vec::new()),
 +                      pending_broadcast_messages: Mutex::new(Vec::new()),
  
                        entropy_source,
                        node_signer,
        }
  
        fn create_and_insert_outbound_scid_alias(&self) -> u64 {
 -              let height = self.best_block.read().unwrap().height();
 +              let height = self.best_block.read().unwrap().height;
                let mut outbound_scid_alias = 0;
                let mut i = 0;
                loop {
                        let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
                        match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
                                their_features, channel_value_satoshis, push_msat, user_channel_id, config,
 -                              self.best_block.read().unwrap().height(), outbound_scid_alias, temporary_channel_id)
 +                              self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id)
                        {
                                Ok(res) => res,
                                Err(e) => {
                // the same channel.
                let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
                {
 -                      let best_block_height = self.best_block.read().unwrap().height();
 +                      let best_block_height = self.best_block.read().unwrap().height;
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                // the same channel.
                let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
                {
 -                      let best_block_height = self.best_block.read().unwrap().height();
 +                      let best_block_height = self.best_block.read().unwrap().height;
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
  
        /// Gets the list of channels we have with a given counterparty, in random order.
        pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let per_peer_state = self.per_peer_state.read().unwrap();
  
                if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
 -              if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
 +              if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
 +                                      // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
 +                                      #[cfg(any(dual_funding, splicing))]
 +                                      ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
 +                                              self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
 +                                              // Unfunded channel has no update
 +                                              (None, chan_phase.context().get_counterparty_node_id())
 +                                      },
                                }
                        } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
                                log_error!(logger, "Force-closing channel {}", &channel_id);
                        }
                };
                if let Some(update) = update_opt {
 -                      // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
 -                      // not try to broadcast it via whatever peer we have.
 -                      let per_peer_state = self.per_peer_state.read().unwrap();
 -                      let a_peer_state_opt = per_peer_state.get(peer_node_id)
 -                              .ok_or(per_peer_state.values().next());
 -                      if let Ok(a_peer_state_mutex) = a_peer_state_opt {
 -                              let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
 -                              a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                      msg: update
 -                              });
 -                      }
 +                      // If we have some Channel Update to broadcast, we cache it and broadcast it later.
 +                      let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                      pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                              msg: update
 +                      });
                }
  
                Ok(counterparty_node_id)
        /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
        /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        ///
 -      /// You can always get the latest local transaction(s) to broadcast from
 -      /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
 +      /// You can always broadcast the latest local transaction(s) via
 +      /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
        pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
                }
        }
  
 +      fn can_forward_htlc_to_outgoing_channel(
 +              &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
 +      ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
 +              if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
 +                      // Note that the behavior here should be identical to the above block - we
 +                      // should NOT reveal the existence or non-existence of a private channel if
 +                      // we don't allow forwards outbound over them.
 +                      return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
 +              }
 +              if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
 +                      // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
 +                      // "refuse to forward unless the SCID alias was used", so we pretend
 +                      // we don't have the channel here.
 +                      return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
 +              }
 +
 +              // Note that we could technically not return an error yet here and just hope
 +              // that the connection is reestablished or monitor updated by the time we get
 +              // around to doing the actual forward, but better to fail early if we can and
 +              // hopefully an attacker trying to path-trace payments cannot make this occur
 +              // on a small/per-node/per-channel scale.
 +              if !chan.context.is_live() { // channel_disabled
 +                      // If the channel_update we're going to return is disabled (i.e. the
 +                      // peer has been disabled for some time), return `channel_disabled`,
 +                      // otherwise return `temporary_channel_failure`.
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
 +                              return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
 +                      } else {
 +                              return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
 +                      }
 +              }
 +              if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
 +              }
 +              if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
 +                      let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
 +                      return Err((err, code, chan_update_opt));
 +              }
 +
 +              Ok(())
 +      }
 +
 +      /// Executes a callback `C` that returns some value `X` on the channel found with the given
 +      /// `scid`. `None` is returned when the channel is not found.
 +      fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
 +              &self, scid: u64, callback: C,
 +      ) -> Option<X> {
 +              let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
 +                      None => return None,
 +                      Some((cp_id, id)) => (cp_id, id),
 +              };
 +              let per_peer_state = self.per_peer_state.read().unwrap();
 +              let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
 +              if peer_state_mutex_opt.is_none() {
 +                      return None;
 +              }
 +              let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
 +              let peer_state = &mut *peer_state_lock;
 +              match peer_state.channel_by_id.get_mut(&channel_id).and_then(
 +                      |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
 +              ) {
 +                      None => None,
 +                      Some(chan) => Some(callback(chan)),
 +              }
 +      }
 +
 +      fn can_forward_htlc(
 +              &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
 +      ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
 +              match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
 +                      self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
 +              }) {
 +                      Some(Ok(())) => {},
 +                      Some(Err(e)) => return Err(e),
 +                      None => {
 +                              // If we couldn't find the channel info for the scid, it may be a phantom or
 +                              // intercept forward.
 +                              if (self.default_configuration.accept_intercept_htlcs &&
 +                                      fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
 +                                      fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
 +                              {} else {
 +                                      return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 +                              }
 +                      }
 +              }
 +
 +              let cur_height = self.best_block.read().unwrap().height + 1;
 +              if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
 +                      cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
 +              ) {
 +                      let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
 +                              self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok()
 +                      }).flatten();
 +                      return Err((err_msg, err_code, chan_update_opt));
 +              }
 +
 +              Ok(())
 +      }
 +
 +      fn htlc_failure_from_update_add_err(
 +              &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
 +              mut err_code: u16, chan_update: Option<msgs::ChannelUpdate>, is_intro_node_blinded_forward: bool,
 +              shared_secret: &[u8; 32]
 +      ) -> HTLCFailureMsg {
 +              let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
 +              if chan_update.is_some() && err_code & 0x1000 == 0x1000 {
 +                      let chan_update = chan_update.unwrap();
 +                      if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
 +                              msg.amount_msat.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      else if err_code == 0x1000 | 13 {
 +                              msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      else if err_code == 0x1000 | 20 {
 +                              // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
 +                              0u16.write(&mut res).expect("Writes cannot fail");
 +                      }
 +                      (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
 +                      msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
 +                      chan_update.write(&mut res).expect("Writes cannot fail");
 +              } else if err_code & 0x1000 == 0x1000 {
 +                      // If we're trying to return an error that requires a `channel_update` but
 +                      // we're forwarding to a phantom or intercept "channel" (i.e. cannot
 +                      // generate an update), just use the generic "temporary_node_failure"
 +                      // instead.
 +                      err_code = 0x2000 | 2;
 +              }
 +
 +              log_info!(
 +                      WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
 +                      "Failed to accept/forward incoming HTLC: {}", err_msg
 +              );
 +              // If `msg.blinding_point` is set, we must always fail with malformed.
 +              if msg.blinding_point.is_some() {
 +                      return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 +                              channel_id: msg.channel_id,
 +                              htlc_id: msg.htlc_id,
 +                              sha256_of_onion: [0; 32],
 +                              failure_code: INVALID_ONION_BLINDING,
 +                      });
 +              }
 +
 +              let (err_code, err_data) = if is_intro_node_blinded_forward {
 +                      (INVALID_ONION_BLINDING, &[0; 32][..])
 +              } else {
 +                      (err_code, &res.0[..])
 +              };
 +              HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 +                      channel_id: msg.channel_id,
 +                      htlc_id: msg.htlc_id,
 +                      reason: HTLCFailReason::reason(err_code, err_data.to_vec())
 +                              .get_encrypted_failure_packet(shared_secret, &None),
 +              })
 +      }
 +
        fn decode_update_add_htlc_onion(
                &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
        ) -> Result<
                        msg, &self.node_signer, &self.logger, &self.secp_ctx
                )?;
  
 -              let is_intro_node_forward = match next_hop {
 -                      onion_utils::Hop::Forward {
 -                              next_hop_data: msgs::InboundOnionPayload::BlindedForward {
 -                                      intro_node_blinding_point: Some(_), ..
 -                              }, ..
 -                      } => true,
 -                      _ => false,
 -              };
 -
 -              macro_rules! return_err {
 -                      ($msg: expr, $err_code: expr, $data: expr) => {
 -                              {
 -                                      log_info!(
 -                                              WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
 -                                              "Failed to accept/forward incoming HTLC: {}", $msg
 -                                      );
 -                                      // If `msg.blinding_point` is set, we must always fail with malformed.
 -                                      if msg.blinding_point.is_some() {
 -                                              return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
 -                                                      channel_id: msg.channel_id,
 -                                                      htlc_id: msg.htlc_id,
 -                                                      sha256_of_onion: [0; 32],
 -                                                      failure_code: INVALID_ONION_BLINDING,
 -                                              }));
 -                                      }
 -
 -                                      let (err_code, err_data) = if is_intro_node_forward {
 -                                              (INVALID_ONION_BLINDING, &[0; 32][..])
 -                                      } else { ($err_code, $data) };
 -                                      return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
 -                                              channel_id: msg.channel_id,
 -                                              htlc_id: msg.htlc_id,
 -                                              reason: HTLCFailReason::reason(err_code, err_data.to_vec())
 -                                                      .get_encrypted_failure_packet(&shared_secret, &None),
 -                                      }));
 -                              }
 -                      }
 -              }
 -
 -              let NextPacketDetails {
 -                      next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
 -              } = match next_packet_details_opt {
 +              let next_packet_details = match next_packet_details_opt {
                        Some(next_packet_details) => next_packet_details,
                        // it is a receive, so no need for outbound checks
                        None => return Ok((next_hop, shared_secret, None)),
  
                // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
                // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
 -              if let Some((err, mut code, chan_update)) = loop {
 -                      let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
 -                      let forwarding_chan_info_opt = match id_option {
 -                              None => { // unknown_next_peer
 -                                      // Note that this is likely a timing oracle for detecting whether an scid is a
 -                                      // phantom or an intercept.
 -                                      if (self.default_configuration.accept_intercept_htlcs &&
 -                                              fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
 -                                              fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
 -                                      {
 -                                              None
 -                                      } else {
 -                                              break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                                      }
 -                              },
 -                              Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
 -                      };
 -                      let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
 -                              let per_peer_state = self.per_peer_state.read().unwrap();
 -                              let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
 -                              if peer_state_mutex_opt.is_none() {
 -                                      break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                              }
 -                              let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
 -                              let peer_state = &mut *peer_state_lock;
 -                              let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
 -                                      |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
 -                              ).flatten() {
 -                                      None => {
 -                                              // Channel was removed. The short_to_chan_info and channel_by_id maps
 -                                              // have no consistency guarantees.
 -                                              break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
 -                                      },
 -                                      Some(chan) => chan
 -                              };
 -                              if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
 -                                      // Note that the behavior here should be identical to the above block - we
 -                                      // should NOT reveal the existence or non-existence of a private channel if
 -                                      // we don't allow forwards outbound over them.
 -                                      break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
 -                              }
 -                              if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
 -                                      // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
 -                                      // "refuse to forward unless the SCID alias was used", so we pretend
 -                                      // we don't have the channel here.
 -                                      break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
 -                              }
 -                              let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
 -
 -                              // Note that we could technically not return an error yet here and just hope
 -                              // that the connection is reestablished or monitor updated by the time we get
 -                              // around to doing the actual forward, but better to fail early if we can and
 -                              // hopefully an attacker trying to path-trace payments cannot make this occur
 -                              // on a small/per-node/per-channel scale.
 -                              if !chan.context.is_live() { // channel_disabled
 -                                      // If the channel_update we're going to return is disabled (i.e. the
 -                                      // peer has been disabled for some time), return `channel_disabled`,
 -                                      // otherwise return `temporary_channel_failure`.
 -                                      if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
 -                                              break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
 -                                      } else {
 -                                              break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
 -                                      }
 -                              }
 -                              if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
 -                                      break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
 -                              }
 -                              if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
 -                                      break Some((err, code, chan_update_opt));
 -                              }
 -                              chan_update_opt
 -                      } else {
 -                              None
 -                      };
 -
 -                      let cur_height = self.best_block.read().unwrap().height() + 1;
 -
 -                      if let Err((err_msg, code)) = check_incoming_htlc_cltv(
 -                              cur_height, outgoing_cltv_value, msg.cltv_expiry
 -                      ) {
 -                              if code & 0x1000 != 0 && chan_update_opt.is_none() {
 -                                      // We really should set `incorrect_cltv_expiry` here but as we're not
 -                                      // forwarding over a real channel we can't generate a channel_update
 -                                      // for it. Instead we just return a generic temporary_node_failure.
 -                                      break Some((err_msg, 0x2000 | 2, None))
 -                              }
 -                              let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
 -                              break Some((err_msg, code, chan_update_opt));
 -                      }
 +              self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
 +                      let (err_msg, err_code, chan_update_opt) = e;
 +                      self.htlc_failure_from_update_add_err(
 +                              msg, counterparty_node_id, err_msg, err_code, chan_update_opt,
 +                              next_hop.is_intro_node_blinded_forward(), &shared_secret
 +                      )
 +              })?;
  
 -                      break None;
 -              }
 -              {
 -                      let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
 -                      if let Some(chan_update) = chan_update {
 -                              if code == 0x1000 | 11 || code == 0x1000 | 12 {
 -                                      msg.amount_msat.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              else if code == 0x1000 | 13 {
 -                                      msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              else if code == 0x1000 | 20 {
 -                                      // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
 -                                      0u16.write(&mut res).expect("Writes cannot fail");
 -                              }
 -                              (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
 -                              msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
 -                              chan_update.write(&mut res).expect("Writes cannot fail");
 -                      } else if code & 0x1000 == 0x1000 {
 -                              // If we're trying to return an error that requires a `channel_update` but
 -                              // we're forwarding to a phantom or intercept "channel" (i.e. cannot
 -                              // generate an update), just use the generic "temporary_node_failure"
 -                              // instead.
 -                              code = 0x2000 | 2;
 -                      }
 -                      return_err!(err, code, &res.0[..]);
 -              }
 -              Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
 +              Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
        }
  
        fn construct_pending_htlc_status<'a>(
                match decoded_hop {
                        onion_utils::Hop::Receive(next_hop_data) => {
                                // OUR PAYMENT!
 -                              let current_height: u32 = self.best_block.read().unwrap().height();
 +                              let current_height: u32 = self.best_block.read().unwrap().height;
                                match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
                                        msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
                                        current_height, self.default_configuration.accept_mpp_keysend)
        /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
        pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_with_route(route, payment_hash, recipient_onion, payment_id,
        /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
        /// `route_params` and retry failed payment paths based on `retry_strategy`.
        pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
  
        #[cfg(test)]
        pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
                        keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
  
        #[cfg(test)]
        pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
        }
  
        }
  
        pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_for_bolt12_invoice(
        ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment_with_route(
                        route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
        ///
        /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
        pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
                        payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
        /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
        /// us to easily discern them from real payments.
        pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
                        &self.entropy_source, &self.node_signer, best_block_height,
                                ProbeSendFailure::RouteNotFound
                        })?;
  
 -              let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
 +              let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
  
                let mut res = Vec::new();
  
                        }));
                }
                {
 -                      let height = self.best_block.read().unwrap().height();
 +                      let height = self.best_block.read().unwrap().height;
                        // Transactions are evaluated as final by network mempools if their locktime is strictly
                        // lower than the next block height. However, the modules constituting our Lightning
                        // node might not have perfect sync about their blockchain views. Thus, if the wallet
                                        }
                                        let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
                                        if let Some(funding_batch_state) = funding_batch_state.as_mut() {
 -                                              funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
 +                                              // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
 +                                              // need to fix this somehow to not rely on using the outpoint for the channel ID if we
 +                                              // want to support V2 batching here as well.
 +                                              funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
                                        }
                                        Ok(outpoint)
                                })
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
 +
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
                                }
                                if let ChannelPhase::Funded(channel) = channel_phase {
                                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
 -                                              peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
 +                                              let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                              pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
                                        } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: channel.context.get_counterparty_node_id(),
                let mut per_source_pending_forward = [(
                        payment.prev_short_channel_id,
                        payment.prev_funding_outpoint,
 +                      payment.prev_channel_id,
                        payment.prev_user_channel_id,
                        vec![(pending_htlc_info, payment.prev_htlc_id)]
                )];
                                short_channel_id: payment.prev_short_channel_id,
                                user_channel_id: Some(payment.prev_user_channel_id),
                                outpoint: payment.prev_funding_outpoint,
 +                              channel_id: payment.prev_channel_id,
                                htlc_id: payment.prev_htlc_id,
                                incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
                                phantom_shared_secret: None,
                Ok(())
        }
  
 +      fn process_pending_update_add_htlcs(&self) {
 +              let mut decode_update_add_htlcs = new_hash_map();
 +              mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
 +
 +              let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
 +                      if let Some(outgoing_scid) = outgoing_scid_opt {
 +                              match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
 +                                      Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
 +                                              HTLCDestination::NextHopChannel {
 +                                                      node_id: Some(*outgoing_counterparty_node_id),
 +                                                      channel_id: *outgoing_channel_id,
 +                                              },
 +                                      None => HTLCDestination::UnknownNextHop {
 +                                              requested_forward_scid: outgoing_scid,
 +                                      },
 +                              }
 +                      } else {
 +                              HTLCDestination::FailedPayment { payment_hash }
 +                      }
 +              };
 +
 +              'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
 +                      let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
 +                              let counterparty_node_id = chan.context.get_counterparty_node_id();
 +                              let channel_id = chan.context.channel_id();
 +                              let funding_txo = chan.context.get_funding_txo().unwrap();
 +                              let user_channel_id = chan.context.get_user_id();
 +                              let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
 +                              (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
 +                      });
 +                      let (
 +                              incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
 +                              incoming_user_channel_id, incoming_accept_underpaying_htlcs
 +                       ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
 +                              incoming_channel_details
 +                      } else {
 +                              // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
 +                              continue;
 +                      };
 +
 +                      let mut htlc_forwards = Vec::new();
 +                      let mut htlc_fails = Vec::new();
 +                      for update_add_htlc in &update_add_htlcs {
 +                              let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
 +                                      &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx
 +                              ) {
 +                                      Ok(decoded_onion) => decoded_onion,
 +                                      Err(htlc_fail) => {
 +                                              htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
 +                                              continue;
 +                                      },
 +                              };
 +
 +                              let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
 +                              let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
 +
 +                              // Process the HTLC on the incoming channel.
 +                              match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
 +                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                      chan.can_accept_incoming_htlc(
 +                                              update_add_htlc, &self.fee_estimator, &logger,
 +                                      )
 +                              }) {
 +                                      Some(Ok(_)) => {},
 +                                      Some(Err((err, code))) => {
 +                                              let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() {
 +                                                      self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel<SP>| {
 +                                                              self.get_channel_update_for_onion(*outgoing_scid, chan).ok()
 +                                                      }).flatten()
 +                                              } else {
 +                                                      None
 +                                              };
 +                                              let htlc_fail = self.htlc_failure_from_update_add_err(
 +                                                      &update_add_htlc, &incoming_counterparty_node_id, err, code,
 +                                                      outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
 +                                              );
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                              continue;
 +                                      },
 +                                      // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
 +                                      None => continue 'outer_loop,
 +                              }
 +
 +                              // Now process the HTLC on the outgoing channel if it's a forward.
 +                              if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
 +                                      if let Err((err, code, chan_update_opt)) = self.can_forward_htlc(
 +                                              &update_add_htlc, next_packet_details
 +                                      ) {
 +                                              let htlc_fail = self.htlc_failure_from_update_add_err(
 +                                                      &update_add_htlc, &incoming_counterparty_node_id, err, code,
 +                                                      chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
 +                                              );
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                              continue;
 +                                      }
 +                              }
 +
 +                              match self.construct_pending_htlc_status(
 +                                      &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
 +                                      incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
 +                              ) {
 +                                      PendingHTLCStatus::Forward(htlc_forward) => {
 +                                              htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
 +                                      },
 +                                      PendingHTLCStatus::Fail(htlc_fail) => {
 +                                              let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
 +                                              htlc_fails.push((htlc_fail, htlc_destination));
 +                                      },
 +                              }
 +                      }
 +
 +                      // Process all of the forwards and failures for the channel in which the HTLCs were
 +                      // proposed to as a batch.
 +                      let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id,
 +                              incoming_user_channel_id, htlc_forwards.drain(..).collect());
 +                      self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
 +                      for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
 +                              let failure = match htlc_fail {
 +                                      HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
 +                                              htlc_id: fail_htlc.htlc_id,
 +                                              err_packet: fail_htlc.reason,
 +                                      },
 +                                      HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
 +                                              htlc_id: fail_malformed_htlc.htlc_id,
 +                                              sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
 +                                              failure_code: fail_malformed_htlc.failure_code,
 +                                      },
 +                              };
 +                              self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure);
 +                              self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
 +                                      prev_channel_id: incoming_channel_id,
 +                                      failed_next_destination: htlc_destination,
 +                              }, None));
 +                      }
 +              }
 +      }
 +
        /// Processes HTLCs which are pending waiting on random forward delay.
        ///
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
        pub fn process_pending_htlc_forwards(&self) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
 +              self.process_pending_update_add_htlcs();
 +
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
 -              let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
 +              let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                {
 -                      let mut forward_htlcs = HashMap::new();
 +                      let mut forward_htlcs = new_hash_map();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
  
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
 -                                                                              forward_info: PendingHTLCInfo {
 +                                                                              prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
 +                                                                              prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                        outgoing_cltv_value, ..
                                                                                }
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
 -                                                                                              let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
 +                                                                                              let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
                                                                                                log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
  
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
                                                                                                        user_channel_id: Some(prev_user_channel_id),
 +                                                                                                      channel_id: prev_channel_id,
                                                                                                        outpoint: prev_funding_outpoint,
                                                                                                        htlc_id: prev_htlc_id,
                                                                                                        incoming_packet_shared_secret: incoming_shared_secret,
                                                                                                };
                                                                                                match next_hop {
                                                                                                        onion_utils::Hop::Receive(hop_data) => {
 -                                                                                                              let current_height: u32 = self.best_block.read().unwrap().height();
 +                                                                                                              let current_height: u32 = self.best_block.read().unwrap().height;
                                                                                                                match create_recv_pending_htlc_info(hop_data,
                                                                                                                        incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                                                        outgoing_cltv_value, Some(phantom_shared_secret), false, None,
                                                                                                                        current_height, self.default_configuration.accept_mpp_keysend)
                                                                                                                {
 -                                                                                                                      Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
 +                                                                                                                      Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
                                                                                                                        Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
                                                                                                        },
                                                for forward_info in pending_forwards.drain(..) {
                                                        let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                                      prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
 -                                                                      forward_info: PendingHTLCInfo {
 +                                                                      prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
 +                                                                      prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
                                                                                routing: PendingHTLCRouting::Forward {
                                                                                        onion_packet, blinded, ..
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
 +                                                                              channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                              prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
 -                                                              forward_info: PendingHTLCInfo {
 +                                                              prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
 +                                                              prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
                                                                        skimmed_fee_msat, ..
                                                                }
                                                                let blinded_failure = routing.blinded_failure();
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
                                                                        PendingHTLCRouting::Receive {
 -                                                                              payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
 -                                                                              custom_tlvs, requires_blinded_error: _
 +                                                                              payment_data, payment_metadata, payment_context,
 +                                                                              incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
 +                                                                              requires_blinded_error: _
                                                                        } => {
                                                                                let _legacy_hop_data = Some(payment_data.clone());
                                                                                let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
                                                                                                payment_metadata, custom_tlvs };
 -                                                                              (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
 +                                                                              (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data, payment_context },
                                                                                        Some(payment_data), phantom_shared_secret, onion_fields)
                                                                        },
 -                                                                      PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
 +                                                                      PendingHTLCRouting::ReceiveKeysend {
 +                                                                              payment_data, payment_preimage, payment_metadata,
 +                                                                              incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
 +                                                                      } => {
                                                                                let onion_fields = RecipientOnionFields {
                                                                                        payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
                                                                                        payment_metadata,
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
 +                                                                              channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                debug_assert!(!committed_to_claimable);
                                                                                let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
 -                                                                                      &self.best_block.read().unwrap().height().to_be_bytes(),
 +                                                                                      &self.best_block.read().unwrap().height.to_be_bytes(),
                                                                                );
                                                                                failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                short_channel_id: $htlc.prev_hop.short_channel_id,
                                                                                                user_channel_id: $htlc.prev_hop.user_channel_id,
 +                                                                                              channel_id: prev_channel_id,
                                                                                                outpoint: prev_funding_outpoint,
                                                                                                htlc_id: $htlc.prev_hop.htlc_id,
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                macro_rules! check_total_value {
                                                                        ($purpose: expr) => {{
                                                                                let mut payment_claimable_generated = false;
 -                                                                              let is_keysend = match $purpose {
 -                                                                                      events::PaymentPurpose::SpontaneousPayment(_) => true,
 -                                                                                      events::PaymentPurpose::InvoicePayment { .. } => false,
 -                                                                              };
 +                                                                              let is_keysend = $purpose.is_keysend();
                                                                                let mut claimable_payments = self.claimable_payments.lock().unwrap();
                                                                                if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                        #[allow(unused_assignments)] {
                                                                                                committed_to_claimable = true;
                                                                                        }
 -                                                                                      let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                        htlcs.push(claimable_htlc);
                                                                                        let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
                                                                                        htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
                                                                match payment_secrets.entry(payment_hash) {
                                                                        hash_map::Entry::Vacant(_) => {
                                                                                match claimable_htlc.onion_payload {
 -                                                                                      OnionPayload::Invoice { .. } => {
 +                                                                                      OnionPayload::Invoice { ref payment_context, .. } => {
                                                                                                let payment_data = payment_data.unwrap();
                                                                                                let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(result) => result,
                                                                                                        }
                                                                                                };
                                                                                                if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
 -                                                                                                      let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
 +                                                                                                      let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
                                                                                                        if (cltv_expiry as u64) < expected_min_expiry_height {
                                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
                                                                                                                        &payment_hash, cltv_expiry, expected_min_expiry_height);
                                                                                                                fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
 -                                                                                              let purpose = events::PaymentPurpose::InvoicePayment {
 -                                                                                                      payment_preimage: payment_preimage.clone(),
 -                                                                                                      payment_secret: payment_data.payment_secret,
 -                                                                                              };
 +                                                                                              let purpose = events::PaymentPurpose::from_parts(
 +                                                                                                      payment_preimage.clone(),
 +                                                                                                      payment_data.payment_secret,
 +                                                                                                      payment_context.clone(),
 +                                                                                              );
                                                                                                check_total_value!(purpose);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
 -                                                                              if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload {
 -                                                                                      log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash);
 -                                                                                      fail_htlc!(claimable_htlc, payment_hash);
 -                                                                              }
 +                                                                              let payment_context = match claimable_htlc.onion_payload {
 +                                                                                      OnionPayload::Spontaneous(_) => {
 +                                                                                              log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", &payment_hash);
 +                                                                                              fail_htlc!(claimable_htlc, payment_hash);
 +                                                                                      },
 +                                                                                      OnionPayload::Invoice { ref payment_context, .. } => payment_context,
 +                                                                              };
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", &payment_hash);
                                                                                                &payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
 -                                                                                      let purpose = events::PaymentPurpose::InvoicePayment {
 -                                                                                              payment_preimage: inbound_payment.get().payment_preimage,
 -                                                                                              payment_secret: payment_data.payment_secret,
 -                                                                                      };
 +                                                                                      let purpose = events::PaymentPurpose::from_parts(
 +                                                                                              inbound_payment.get().payment_preimage,
 +                                                                                              payment_data.payment_secret,
 +                                                                                              payment_context.clone(),
 +                                                                                      );
                                                                                        let payment_claimable_generated = check_total_value!(purpose);
                                                                                        if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
                        }
                }
  
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
                        || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
                        &self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
  
                for event in background_events.drain(..) {
                        match event {
 -                              BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
 +                              BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                },
 -                              BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
 +                              BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
                                        let mut updated_chan = false;
                                        {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
 -                                                      match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
 +                                                      match peer_state.channel_by_id.entry(channel_id) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
                                                                                updated_chan = true;
  
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
 -                      if new_feerate != chan.context.get_feerate_sat_per_1000_weight() {
 -                              log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {}.",
 -                              chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
 -                      }
                        return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
                                                                                if n >= DISABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                                              let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                                                                              pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
                                                                                if n >= ENABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                                              let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                                                                              pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
                                                                process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
                                                                        pending_msg_events, counterparty_node_id)
                                                        },
 +                                                      #[cfg(any(dual_funding, splicing))]
 +                                                      ChannelPhase::UnfundedInboundV2(chan) => {
 +                                                              process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
 +                                                                      pending_msg_events, counterparty_node_id)
 +                                                      },
 +                                                      #[cfg(any(dual_funding, splicing))]
 +                                                      ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                              process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
 +                                                                      pending_msg_events, counterparty_node_id)
 +                                                      },
                                                }
                                        });
  
                        FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
                        FailureCode::IncorrectOrUnknownPaymentDetails => {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
 -                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
 +                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
                                HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
                        },
                        FailureCode::InvalidOnionPayload(data) => {
                }
        }
  
 +      fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
 +              let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
 +              if push_forward_event { self.push_pending_forwards_ev(); }
 +      }
 +
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
 -      fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
 +      fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
                // Ensure that no peer state channel storage lock is held when calling this function.
                // This ensures that future code doesn't introduce a lock-order requirement for
                // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
 +              let mut push_forward_event;
                match source {
                        HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
 -                              if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
 +                              push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
                                        session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
 -                                      &self.pending_events, &self.logger)
 -                              { self.push_pending_forwards_ev(); }
 +                                      &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
 -                              ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
 +                              ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
                        }) => {
                                log_trace!(
 -                                      WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
 +                                      WithContext::from(&self.logger, None, Some(*channel_id)),
                                        "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
                                        if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
                                );
                                        }
                                };
  
 -                              let mut push_forward_ev = false;
 +                              push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
 -                              if forward_htlcs.is_empty() {
 -                                      push_forward_ev = true;
 -                              }
 +                              push_forward_event &= forward_htlcs.is_empty();
                                match forward_htlcs.entry(*short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                entry.get_mut().push(failure);
                                        }
                                }
                                mem::drop(forward_htlcs);
 -                              if push_forward_ev { self.push_pending_forwards_ev(); }
                                let mut pending_events = self.pending_events.lock().unwrap();
                                pending_events.push_back((events::Event::HTLCHandlingFailed {
 -                                      prev_channel_id: outpoint.to_channel_id(),
 +                                      prev_channel_id: *channel_id,
                                        failed_next_destination: destination,
                                }, None));
                        },
                }
 +              push_forward_event
        }
  
        /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
 -                              let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
 +                              let prev_hop_chan_id = htlc.prev_hop.channel_id;
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
                if !valid_mpp {
                        for htlc in sources.drain(..) {
                                let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
 -                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
 +                              htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
                                let source = HTLCSource::PreviousHopData(htlc.prev_hop);
                                let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
                                let receiver = HTLCDestination::FailedPayment { payment_hash };
  
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 -                      let chan_id = prev_hop.outpoint.to_channel_id();
 +                      let chan_id = prev_hop.channel_id;
                        let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                                Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
                                None => None
                                                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                                                        counterparty_node_id,
                                                                                        funding_txo: prev_hop.outpoint,
 +                                                                                      channel_id: prev_hop.channel_id,
                                                                                        update: monitor_update.clone(),
                                                                                });
                                                                }
  
                                                                log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
                                                                        chan_id, action);
 -                                                              let (node_id, funding_outpoint, blocker) =
 +                                                              let (node_id, _funding_outpoint, channel_id, blocker) =
                                                                if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: node_id,
                                                                        downstream_funding_outpoint: funding_outpoint,
 -                                                                      blocking_action: blocker,
 +                                                                      blocking_action: blocker, downstream_channel_id: channel_id,
                                                                } = action {
 -                                                                      (node_id, funding_outpoint, blocker)
 +                                                                      (node_id, funding_outpoint, channel_id, blocker)
                                                                } else {
                                                                        debug_assert!(false,
                                                                                "Duplicate claims should always free another channel immediately");
                                                                        let mut peer_state = peer_state_mtx.lock().unwrap();
                                                                        if let Some(blockers) = peer_state
                                                                                .actions_blocking_raa_monitor_updates
 -                                                                              .get_mut(&funding_outpoint.to_channel_id())
 +                                                                              .get_mut(&channel_id)
                                                                        {
                                                                                let mut found_blocker = false;
                                                                                blockers.retain(|iter| {
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage,
                        }],
 +                      channel_id: Some(prev_hop.channel_id),
                };
  
                if !during_init {
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
 -                              log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
 +                              log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
 +                                      "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
                } else {
                        // complete the monitor update completion action from `completion_action`.
                        self.pending_background_events.lock().unwrap().push(
                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
 -                                      prev_hop.outpoint, preimage_update,
 +                                      prev_hop.outpoint, prev_hop.channel_id, preimage_update,
                                )));
                }
                // Note that we do process the completion action here. This totally could be a
        }
  
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
 -              forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
 -              next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
 +              forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
 +              startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
 +              next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
        ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
                                        debug_assert_eq!(pubkey, path.hops[0].pubkey);
                                }
                                let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
 -                                      channel_funding_outpoint: next_channel_outpoint,
 +                                      channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
                                        counterparty_node_id: path.hops[0].pubkey,
                                };
                                self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
                                        &self.logger);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
 -                              let prev_outpoint = hop_data.outpoint;
 +                              let prev_channel_id = hop_data.channel_id;
 +                              let prev_user_channel_id = hop_data.user_channel_id;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                        if let Some(node_id) = next_channel_counterparty_node_id {
 -                                                              Some((node_id, next_channel_outpoint, completed_blocker))
 +                                                              Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
                                                        } else {
                                                                // We can only get `None` here if we are processing a
                                                                // `ChannelMonitor`-originated event, in which case we
                                                                                },
                                                                                // or the channel we'd unblock is already closed,
                                                                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
 -                                                                                      (funding_txo, monitor_update)
 +                                                                                      (funding_txo, _channel_id, monitor_update)
                                                                                ) => {
                                                                                        if *funding_txo == next_channel_outpoint {
                                                                                                assert_eq!(monitor_update.updates.len(), 1);
                                                                                BackgroundEvent::MonitorUpdatesComplete {
                                                                                        channel_id, ..
                                                                                } =>
 -                                                                                      *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
 +                                                                                      *channel_id == prev_channel_id,
                                                                        }
                                                                }), "{:?}", *background_events);
                                                        }
                                                                Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: other_chan.0,
                                                                        downstream_funding_outpoint: other_chan.1,
 -                                                                      blocking_action: other_chan.2,
 +                                                                      downstream_channel_id: other_chan.2,
 +                                                                      blocking_action: other_chan.3,
                                                                })
                                                        } else { None }
                                                } else {
 -                                                      let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
 +                                                      let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
                                                                if let Some(claimed_htlc_value) = htlc_claim_value_msat {
                                                                        Some(claimed_htlc_value - forwarded_htlc_value)
                                                                } else { None }
                                                        } else { None };
 +                                                      debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
 +                                                              "skimmed_fee_msat must always be included in total_fee_earned_msat");
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
 -                                                                      fee_earned_msat,
 +                                                                      prev_channel_id: Some(prev_channel_id),
 +                                                                      next_channel_id: Some(next_channel_id),
 +                                                                      prev_user_channel_id,
 +                                                                      next_user_channel_id,
 +                                                                      total_fee_earned_msat,
 +                                                                      skimmed_fee_msat,
                                                                        claim_from_onchain_tx: from_onchain,
 -                                                                      prev_channel_id: Some(prev_outpoint.to_channel_id()),
 -                                                                      next_channel_id: Some(next_channel_outpoint.to_channel_id()),
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
                                                                },
                                                                downstream_counterparty_and_funding_outpoint: chan_to_release,
                                        event, downstream_counterparty_and_funding_outpoint
                                } => {
                                        self.pending_events.lock().unwrap().push_back((event, None));
 -                                      if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
 -                                              self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
 +                                      if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
 +                                              self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
                                        }
                                },
                                MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
 -                                      downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
 +                                      downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
                                } => {
                                        self.handle_monitor_update_release(
                                                downstream_counterparty_node_id,
                                                downstream_funding_outpoint,
 +                                              downstream_channel_id,
                                                Some(blocking_action),
                                        );
                                },
        fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
                channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
 -              pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
 +              pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
 +              funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
 -      -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
 +      -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
                let logger = WithChannelContext::from(&self.logger, &channel.context);
 -              log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
 +              log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
 -                      if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
 +                      if commitment_update.is_some() { "a" } else { "no" },
 +                      pending_forwards.len(), pending_update_adds.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
                        if channel_ready.is_some() { "sending" } else { "without" },
                        if announcement_sigs.is_some() { "sending" } else { "without" });
  
 -              let mut htlc_forwards = None;
 -
                let counterparty_node_id = channel.context.get_counterparty_node_id();
 +              let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
 +
 +              let mut htlc_forwards = None;
                if !pending_forwards.is_empty() {
 -                      htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
 -                              channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
 +                      htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(),
 +                              channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
 +              }
 +              let mut decode_update_add_htlcs = None;
 +              if !pending_update_adds.is_empty() {
 +                      decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
                }
  
                if let Some(msg) = channel_ready {
                        emit_channel_ready_event!(pending_events, channel);
                }
  
 -              htlc_forwards
 +              (htlc_forwards, decode_update_add_htlcs)
        }
  
 -      fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
 +      fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
  
                let counterparty_node_id = match counterparty_node_id {
                                // TODO: Once we can rely on the counterparty_node_id from the
                                // monitor event, this and the outpoint_to_peer map should be removed.
                                let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
 -                              match outpoint_to_peer.get(&funding_txo) {
 +                              match outpoint_to_peer.get(funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
 -                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
 +                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
 -                                      .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
 +                                      .remove(&channel_id).unwrap_or(Vec::new());
                                mem::drop(peer_state_lock);
                                mem::drop(per_peer_state);
                                self.handle_monitor_update_completion_actions(update_actions);
                // happening and return an error. N.B. that we create channel with an outbound SCID of zero so
                // that we can delay allocating the SCID until after we're sure that the checks below will
                // succeed.
 -              let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
 +              let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
                        Some(unaccepted_channel) => {
 -                              let best_block_height = self.best_block.read().unwrap().height();
 +                              let best_block_height = self.best_block.read().unwrap().height;
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
 -                                      &self.logger, accept_0conf).map_err(|e| {
 -                                              let err_str = e.to_string();
 -                                              log_error!(logger, "{}", err_str);
 -
 -                                              APIError::ChannelUnavailable { err: err_str }
 -                                      })
 -                              }
 +                                      &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
 +                      },
                        _ => {
                                let err_str = "No such channel awaiting to be accepted.".to_owned();
                                log_error!(logger, "{}", err_str);
  
 -                              Err(APIError::APIMisuseError { err: err_str })
 +                              return Err(APIError::APIMisuseError { err: err_str });
 +                      }
 +              };
 +
 +              match res {
 +                      Err(err) => {
 +                              mem::drop(peer_state_lock);
 +                              mem::drop(per_peer_state);
 +                              match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
 +                                      Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
 +                                      Err(e) => {
 +                                              return Err(APIError::ChannelUnavailable { err: e.err });
 +                                      },
 +                              }
                        }
 -              }?;
 +                      Ok(mut channel) => {
 +                              if accept_0conf {
 +                                      // This should have been correctly configured by the call to InboundV1Channel::new.
 +                                      debug_assert!(channel.context.minimum_depth().unwrap() == 0);
 +                              } else if channel.context.get_channel_type().requires_zero_conf() {
 +                                      let send_msg_err_event = events::MessageSendEvent::HandleError {
 +                                              node_id: channel.context.get_counterparty_node_id(),
 +                                              action: msgs::ErrorAction::SendErrorMessage{
 +                                                      msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
 +                                              }
 +                                      };
 +                                      peer_state.pending_msg_events.push(send_msg_err_event);
 +                                      let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
 +                                      log_error!(logger, "{}", err_str);
  
 -              if accept_0conf {
 -                      // This should have been correctly configured by the call to InboundV1Channel::new.
 -                      debug_assert!(channel.context.minimum_depth().unwrap() == 0);
 -              } else if channel.context.get_channel_type().requires_zero_conf() {
 -                      let send_msg_err_event = events::MessageSendEvent::HandleError {
 -                              node_id: channel.context.get_counterparty_node_id(),
 -                              action: msgs::ErrorAction::SendErrorMessage{
 -                                      msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
 -                              }
 -                      };
 -                      peer_state.pending_msg_events.push(send_msg_err_event);
 -                      let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
 -                      log_error!(logger, "{}", err_str);
 +                                      return Err(APIError::APIMisuseError { err: err_str });
 +                              } else {
 +                                      // If this peer already has some channels, a new channel won't increase our number of peers
 +                                      // with unfunded channels, so as long as we aren't over the maximum number of unfunded
 +                                      // channels per-peer we can accept channels from a peer with existing ones.
 +                                      if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
 +                                              let send_msg_err_event = events::MessageSendEvent::HandleError {
 +                                                      node_id: channel.context.get_counterparty_node_id(),
 +                                                      action: msgs::ErrorAction::SendErrorMessage{
 +                                                              msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
 +                                                      }
 +                                              };
 +                                              peer_state.pending_msg_events.push(send_msg_err_event);
 +                                              let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
 +                                              log_error!(logger, "{}", err_str);
  
 -                      return Err(APIError::APIMisuseError { err: err_str });
 -              } else {
 -                      // If this peer already has some channels, a new channel won't increase our number of peers
 -                      // with unfunded channels, so as long as we aren't over the maximum number of unfunded
 -                      // channels per-peer we can accept channels from a peer with existing ones.
 -                      if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
 -                              let send_msg_err_event = events::MessageSendEvent::HandleError {
 -                                      node_id: channel.context.get_counterparty_node_id(),
 -                                      action: msgs::ErrorAction::SendErrorMessage{
 -                                              msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
 +                                              return Err(APIError::APIMisuseError { err: err_str });
                                        }
 -                              };
 -                              peer_state.pending_msg_events.push(send_msg_err_event);
 -                              let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
 -                              log_error!(logger, "{}", err_str);
 -
 -                              return Err(APIError::APIMisuseError { err: err_str });
 -                      }
 -              }
 +                              }
  
 -              // Now that we know we have a channel, assign an outbound SCID alias.
 -              let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
 -              channel.context.set_outbound_scid_alias(outbound_scid_alias);
 +                              // Now that we know we have a channel, assign an outbound SCID alias.
 +                              let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
 +                              channel.context.set_outbound_scid_alias(outbound_scid_alias);
  
 -              peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
 -                      node_id: channel.context.get_counterparty_node_id(),
 -                      msg: channel.accept_inbound_channel(),
 -              });
 +                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
 +                                      node_id: channel.context.get_counterparty_node_id(),
 +                                      msg: channel.accept_inbound_channel(),
 +                              });
  
 -              peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
 +                              peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
  
 -              Ok(())
 +                              Ok(())
 +                      },
 +              }
        }
  
        /// Gets the number of peers which match the given filter and do not have any funded, outbound,
        fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
        where Filter: Fn(&PeerState<SP>) -> bool {
                let mut peers_without_funded_channels = 0;
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                {
                        let peer_state_lock = self.per_peer_state.read().unwrap();
                        for (_, peer_mtx) in peer_state_lock.iter() {
                                                num_unfunded_channels += 1;
                                        }
                                },
 +                              // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
 +                              #[cfg(any(dual_funding, splicing))]
 +                              ChannelPhase::UnfundedInboundV2(chan) => {
 +                                      // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
 +                                      // included in the unfunded count.
 +                                      if chan.context.minimum_depth().unwrap_or(1) != 0 &&
 +                                              chan.dual_funding_context.our_funding_satoshis == 0 {
 +                                              num_unfunded_channels += 1;
 +                                      }
 +                              },
                                ChannelPhase::UnfundedOutboundV1(_) => {
                                        // Outbound channels don't contribute to the unfunded count in the DoS context.
                                        continue;
 +                              },
 +                              // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
 +                              #[cfg(any(dual_funding, splicing))]
 +                              ChannelPhase::UnfundedOutboundV2(_) => {
 +                                      // Outbound channels don't contribute to the unfunded count in the DoS context.
 +                                      continue;
                                }
                        }
                }
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
                // likely to be lost on restart!
 -              if msg.chain_hash != self.chain_hash {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
 +              if msg.common_fields.chain_hash != self.chain_hash {
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
 +                               msg.common_fields.temporary_channel_id.clone()));
                }
  
                if !self.default_configuration.accept_inbound_channels {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
 +                               msg.common_fields.temporary_channel_id.clone()));
                }
  
                // Get the number of peers with channels, but without funded ones. We don't care too much
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                    .ok_or_else(|| {
                                debug_assert!(false);
 -                              MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
 +                              MsgHandleErrInternal::send_err_msg_no_close(
 +                                      format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
 +                                      msg.common_fields.temporary_channel_id.clone())
                        })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
 -                              msg.temporary_channel_id.clone()));
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
 -                              msg.temporary_channel_id.clone()));
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
 -              let channel_id = msg.temporary_channel_id;
 +              let channel_id = msg.common_fields.temporary_channel_id;
                let channel_exists = peer_state.has_channel(&channel_id);
                if channel_exists {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "temporary_channel_id collision for the same peer!".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
                        let channel_type = channel::channel_type_from_open_channel(
 -                                      &msg, &peer_state.latest_features, &self.channel_type_features()
 +                                      &msg.common_fields, &peer_state.latest_features, &self.channel_type_features()
                                ).map_err(|e|
 -                                      MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
 +                                      MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id)
                                )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
 -                              temporary_channel_id: msg.temporary_channel_id.clone(),
 +                              temporary_channel_id: msg.common_fields.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
 -                              funding_satoshis: msg.funding_satoshis,
 +                              funding_satoshis: msg.common_fields.funding_satoshis,
                                push_msat: msg.push_msat,
                                channel_type,
                        }, None));
                        &self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false)
                {
                        Err(e) => {
 -                              return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
 +                              return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id));
                        },
                        Ok(res) => res
                };
  
                let channel_type = channel.context.get_channel_type();
                if channel_type.requires_zero_conf() {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "No zero confirmation channels accepted".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
                if channel_type.requires_anchors_zero_fee_htlc_tx() {
 -                      return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), msg.temporary_channel_id.clone()));
 +                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                              "No channels with anchor outputs accepted".to_owned(),
 +                              msg.common_fields.temporary_channel_id.clone()));
                }
  
                let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
 -                                      MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
 +                                      MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
                                })?;
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
 -                      match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
 +                      match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut phase) => {
                                        match phase.get_mut() {
                                                ChannelPhase::UnfundedOutboundV1(chan) => {
                                                        (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
                                                },
                                                _ => {
 -                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
 +                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
                                                }
                                        }
                                },
 -                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
 +                              hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
                pending_events.push_back((events::Event::FundingGenerationReady {
 -                      temporary_channel_id: msg.temporary_channel_id,
 +                      temporary_channel_id: msg.common_fields.temporary_channel_id,
                        counterparty_node_id: *counterparty_node_id,
                        channel_value_satoshis: value,
                        output_script,
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
                                                finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
 +                                      // TODO(dual_funding): Combine this match arm with above.
 +                                      #[cfg(any(dual_funding, splicing))]
 +                                      ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
 +                                              let context = phase.context_mut();
 +                                              log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
 +                                              let mut chan = remove_channel_phase!(self, chan_phase_entry);
 +                                              finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
 +                                      },
                                }
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
                if let Some(ChannelPhase::Funded(chan)) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                              let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 -                              let peer_state = &mut *peer_state_lock;
 -                              peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                              let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                              pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                });
                        }
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 -                                      let pending_forward_info = match decoded_hop_res {
 +                                      let mut pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
                                                        self.construct_pending_htlc_status(
                                                                msg, counterparty_node_id, shared_secret, next_hop,
                                                        ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
 -                                      let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
 +                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                      // If the update_add is completely bogus, the call will Err and we will close,
 +                                      // but if we've sent a shutdown and they haven't acknowledged it yet, we just
 +                                      // want to reject the new HTLC and fail it backwards instead of forwarding.
 +                                      if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
                                                if msg.blinding_point.is_some() {
 -                                                      return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
 -                                                                      msgs::UpdateFailMalformedHTLC {
 -                                                                              channel_id: msg.channel_id,
 -                                                                              htlc_id: msg.htlc_id,
 -                                                                              sha256_of_onion: [0; 32],
 -                                                                              failure_code: INVALID_ONION_BLINDING,
 -                                                                      }
 -                                                      ))
 -                                              }
 -                                              // If the update_add is completely bogus, the call will Err and we will close,
 -                                              // but if we've sent a shutdown and they haven't acknowledged it yet, we just
 -                                              // want to reject the new HTLC and fail it backwards instead of forwarding.
 -                                              match pending_forward_info {
 -                                                      PendingHTLCStatus::Forward(PendingHTLCInfo {
 -                                                              ref incoming_shared_secret, ref routing, ..
 -                                                      }) => {
 -                                                              let reason = if routing.blinded_failure().is_some() {
 -                                                                      HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
 -                                                              } else if (error_code & 0x1000) != 0 {
 -                                                                      let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
 -                                                                      HTLCFailReason::reason(real_code, error_data)
 -                                                              } else {
 -                                                                      HTLCFailReason::from_failure_code(error_code)
 -                                                              }.get_encrypted_failure_packet(incoming_shared_secret, &None);
 -                                                              let msg = msgs::UpdateFailHTLC {
 +                                                      pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
 +                                                              msgs::UpdateFailMalformedHTLC {
                                                                        channel_id: msg.channel_id,
                                                                        htlc_id: msg.htlc_id,
 -                                                                      reason
 -                                                              };
 -                                                              PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
 -                                                      },
 -                                                      _ => pending_forward_info
 +                                                                      sha256_of_onion: [0; 32],
 +                                                                      failure_code: INVALID_ONION_BLINDING,
 +                                                              }
 +                                                      ))
 +                                              } else {
 +                                                      match pending_forward_info {
 +                                                              PendingHTLCStatus::Forward(PendingHTLCInfo {
 +                                                                      ref incoming_shared_secret, ref routing, ..
 +                                                              }) => {
 +                                                                      let reason = if routing.blinded_failure().is_some() {
 +                                                                              HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
 +                                                                      } else if (error_code & 0x1000) != 0 {
 +                                                                              let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
 +                                                                              HTLCFailReason::reason(real_code, error_data)
 +                                                                      } else {
 +                                                                              HTLCFailReason::from_failure_code(error_code)
 +                                                                      }.get_encrypted_failure_packet(incoming_shared_secret, &None);
 +                                                                      let msg = msgs::UpdateFailHTLC {
 +                                                                              channel_id: msg.channel_id,
 +                                                                              htlc_id: msg.htlc_id,
 +                                                                              reason
 +                                                                      };
 +                                                                      pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
 +                                                              },
 +                                                              _ => {},
 +                                                      }
                                                }
 -                                      };
 -                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 -                                      try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
 +                                      }
 +                                      try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
  
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let funding_txo;
 -              let (htlc_source, forwarded_htlc_value) = {
 +              let next_user_channel_id;
 +              let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                // outbound HTLC is claimed. This is guaranteed to all complete before we
                                                // process the RAA as messages are processed from single peers serially.
                                                funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
 +                                              next_user_channel_id = chan.context.get_user_id();
                                                res
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
 -              self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
 +              self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
 +                      Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
 +                      funding_txo, msg.channel_id, Some(next_user_channel_id),
 +              );
 +
                Ok(())
        }
  
                }
        }
  
 +      fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
 +              let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
 +              let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
 +              push_forward_event &= decode_update_add_htlcs.is_empty();
 +              let scid = update_add_htlcs.0;
 +              match decode_update_add_htlcs.entry(scid) {
 +                      hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
 +                      hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
 +              }
 +              if push_forward_event { self.push_pending_forwards_ev(); }
 +      }
 +
 +      #[inline]
 +      fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
 +              let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
 +              if push_forward_event { self.push_pending_forwards_ev() }
 +      }
 +
        #[inline]
 -      fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
 -              for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
 -                      let mut push_forward_event = false;
 +      fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
 +              let mut push_forward_event = false;
 +              for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut new_intercept_events = VecDeque::new();
                        let mut failed_intercept_forwards = Vec::new();
                        if !pending_forwards.is_empty() {
                                        // Pull this now to avoid introducing a lock order with `forward_htlcs`.
                                        let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
  
 +                                      let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                        let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
                                        let forward_htlcs_empty = forward_htlcs.is_empty();
                                        match forward_htlcs.entry(scid) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                              prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
 +                                                              prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
                                                                                        intercept_id
                                                                                }, None));
                                                                                entry.insert(PendingAddHTLCInfo {
 -                                                                                      prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
 +                                                                                      prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
 -                                                                              let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
 +                                                                              let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
                                                                                log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
                                                                                        user_channel_id: Some(prev_user_channel_id),
                                                                                        outpoint: prev_funding_outpoint,
 +                                                                                      channel_id: prev_channel_id,
                                                                                        htlc_id: prev_htlc_id,
                                                                                        incoming_packet_shared_secret: forward_info.incoming_shared_secret,
                                                                                        phantom_shared_secret: None,
                                                        } else {
                                                                // We don't want to generate a PendingHTLCsForwardable event if only intercepted
                                                                // payments are being processed.
 -                                                              if forward_htlcs_empty {
 -                                                                      push_forward_event = true;
 -                                                              }
 +                                                              push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
                                                                entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
 -                                                                      prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
 +                                                                      prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
                                                        }
                                                }
                                        }
                        }
  
                        for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
 -                              self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
 +                              push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
                        }
  
                        if !new_intercept_events.is_empty() {
                                let mut events = self.pending_events.lock().unwrap();
                                events.append(&mut new_intercept_events);
                        }
 -                      if push_forward_event { self.push_pending_forwards_ev() }
                }
 +              push_forward_event
        }
  
        fn push_pending_forwards_ev(&self) {
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
                actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
 -              channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
 +              channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
        ) -> bool {
                actions_blocking_raa_monitor_updates
 -                      .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
 +                      .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
                || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
                        action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                channel_funding_outpoint,
 +                              channel_id,
                                counterparty_node_id,
                        })
                })
  
                        if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
                                return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
 -                                      chan.context().get_funding_txo().unwrap(), counterparty_node_id);
 +                                      chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
                        }
                }
                false
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
 -                                                              &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
 +                                                              &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
                                                                *counterparty_node_id)
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
  
                                        peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
                                                msg: try_chan_phase_entry!(self, chan.announcement_signatures(
 -                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height(),
 +                                                      &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
                                                        msg, &self.default_configuration
                                                ), chan_phase_entry),
                                                // Note that announcement_signatures fails if the channel cannot be announced,
        }
  
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
 -              let htlc_forwards;
                let need_lnd_workaround = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
  
                                                        }
                                                }
                                                let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
 -                                              htlc_forwards = self.handle_channel_resumption(
 +                                              let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
                                                        &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
 -                                                      Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
 +                                                      Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
 +                                              debug_assert!(htlc_forwards.is_none());
 +                                              debug_assert!(decode_update_add_htlcs.is_none());
                                                if let Some(upd) = channel_update {
                                                        peer_state.pending_msg_events.push(upd);
                                                }
                        }
                };
  
 -              let mut persist = NotifyOption::SkipPersistHandleEvents;
 -              if let Some(forwards) = htlc_forwards {
 -                      self.forward_htlcs(&mut [forwards][..]);
 -                      persist = NotifyOption::DoPersist;
 -              }
 -
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
 -              Ok(persist)
 +              Ok(NotifyOption::SkipPersistHandleEvents)
        }
  
        /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
 -              for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
 +              for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
 -                                              let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
 +                                              let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
 -                                                      self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
 +                                                      self.claim_funds_internal(htlc_update.source, preimage,
 +                                                              htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
 +                                                              false, counterparty_node_id, funding_outpoint, channel_id, None);
                                                } else {
                                                        log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
 -                                                      let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
 +                                                      let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
                                                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
 -                                      MonitorEvent::HolderForceClosed(funding_outpoint) => {
 +                                      MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
 -                                                              if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
 +                                                              if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                                              failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
 +                                                                              let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
 +                                                                                      reason
 +                                                                              } else {
 +                                                                                      ClosureReason::HolderForceClosed
 +                                                                              };
 +                                                                              failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                                      let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                                                                      pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
 -                                                                                              msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
 +                                                                                              msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
                                                                                        },
                                                                                });
                                                                        }
                                                        }
                                                }
                                        },
 -                                      MonitorEvent::Completed { funding_txo, monitor_update_id } => {
 -                                              self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
 +                                      MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
 +                                              self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
                                        },
                                }
                        }
                                                                                // We're done with this channel. We got a closing_signed and sent back
                                                                                // a closing_signed with a closing transaction to broadcast.
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                                      let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                                                                      pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
 -                      if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
 +                      if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
                                self.pending_background_events.lock().unwrap().push(
                                        BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
 -                                              counterparty_node_id, funding_txo, update
 +                                              counterparty_node_id, funding_txo, update, channel_id,
                                        });
                        }
                        self.finish_close_channel(failure);
                }
        }
 +}
  
 +macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
        /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
        /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
        /// not have an expiration unless otherwise set on the builder.
        /// [`Offer`]: crate::offers::offer::Offer
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        pub fn create_offer_builder(
 -              &self, description: String
 -      ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
 -              let node_id = self.get_our_node_id();
 -              let expanded_key = &self.inbound_payment_key;
 -              let entropy = &*self.entropy_source;
 -              let secp_ctx = &self.secp_ctx;
 -
 -              let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 +              &$self, description: String
 +      ) -> Result<$builder, Bolt12SemanticError> {
 +              let node_id = $self.get_our_node_id();
 +              let expanded_key = &$self.inbound_payment_key;
 +              let entropy = &*$self.entropy_source;
 +              let secp_ctx = &$self.secp_ctx;
 +
 +              let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = OfferBuilder::deriving_signing_pubkey(
                        description, node_id, expanded_key, entropy, secp_ctx
                )
 -                      .chain_hash(self.chain_hash)
 +                      .chain_hash($self.chain_hash)
                        .path(path);
  
 -              Ok(builder)
 +              Ok(builder.into())
        }
 +} }
  
 +macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
        /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
        /// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
        ///
        /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
        /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
        pub fn create_refund_builder(
 -              &self, description: String, amount_msats: u64, absolute_expiry: Duration,
 +              &$self, description: String, amount_msats: u64, absolute_expiry: Duration,
                payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
 -      ) -> Result<RefundBuilder<secp256k1::All>, Bolt12SemanticError> {
 -              let node_id = self.get_our_node_id();
 -              let expanded_key = &self.inbound_payment_key;
 -              let entropy = &*self.entropy_source;
 -              let secp_ctx = &self.secp_ctx;
 +      ) -> Result<$builder, Bolt12SemanticError> {
 +              let node_id = $self.get_our_node_id();
 +              let expanded_key = &$self.inbound_payment_key;
 +              let entropy = &*$self.entropy_source;
 +              let secp_ctx = &$self.secp_ctx;
  
 -              let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 +              let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = RefundBuilder::deriving_payer_id(
                        description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
                )?
 -                      .chain_hash(self.chain_hash)
 +                      .chain_hash($self.chain_hash)
                        .absolute_expiry(absolute_expiry)
                        .path(path);
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
 +
                let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
 -              self.pending_outbound_payments
 +              $self.pending_outbound_payments
                        .add_new_awaiting_invoice(
                                payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
                        )
                        .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
  
 -              Ok(builder)
 +              Ok(builder.into())
        }
 +} }
 +
 +impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
 +where
 +      M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
 +      T::Target: BroadcasterInterface,
 +      ES::Target: EntropySource,
 +      NS::Target: NodeSigner,
 +      SP::Target: SignerProvider,
 +      F::Target: FeeEstimator,
 +      R::Target: Router,
 +      L::Target: Logger,
 +{
 +      #[cfg(not(c_bindings))]
 +      create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
 +      #[cfg(not(c_bindings))]
 +      create_refund_builder!(self, RefundBuilder<secp256k1::All>);
 +
 +      #[cfg(c_bindings)]
 +      create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
 +      #[cfg(c_bindings)]
 +      create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
  
        /// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
        /// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
        /// Errors if:
        /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
        /// - the provided parameters are invalid for the offer,
 +      /// - the offer is for an unsupported chain, or
        /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
        ///   request.
        ///
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
  
 -              let builder = offer
 +              let builder: InvoiceRequestBuilder<DerivedPayerId, secp256k1::All> = offer
                        .request_invoice_deriving_payer_id(expanded_key, entropy, secp_ctx, payment_id)?
 -                      .chain_hash(self.chain_hash)?;
 +                      .into();
 +              let builder = builder.chain_hash(self.chain_hash)?;
 +
                let builder = match quantity {
                        None => builder,
                        Some(quantity) => builder.quantity(quantity)?,
                let invoice_request = builder.build_and_sign()?;
                let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                let expiration = StaleExpiration::TimerTicks(1);
                self.pending_outbound_payments
                        .add_new_awaiting_invoice(
        ///
        /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
        /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding
 -      /// [`PaymentPreimage`].
 +      /// [`PaymentPreimage`]. It is returned purely for informational purposes.
        ///
        /// # Limitations
        ///
        ///
        /// # Errors
        ///
 -      /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
 -      /// path for the invoice.
 +      /// Errors if:
 +      /// - the refund is for an unsupported chain, or
 +      /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
 +      ///   the invoice.
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
 -      pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
 +      pub fn request_refund_payment(
 +              &self, refund: &Refund
 +      ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
                let amount_msats = refund.amount_msats();
                let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
  
 +              if refund.chain() != self.chain_hash {
 +                      return Err(Bolt12SemanticError::UnsupportedChain);
 +              }
 +
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
                        Ok((payment_hash, payment_secret)) => {
 -                              let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
 +                              let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
 +                              let payment_paths = self.create_blinded_payment_paths(
 +                                      amount_msats, payment_secret, payment_context
 +                              )
                                        .map_err(|_| Bolt12SemanticError::MissingPaths)?;
  
 -                              #[cfg(not(feature = "no-std"))]
 +                              #[cfg(feature = "std")]
                                let builder = refund.respond_using_derived_keys(
                                        payment_paths, payment_hash, expanded_key, entropy
                                )?;
 -                              #[cfg(feature = "no-std")]
 +                              #[cfg(not(feature = "std"))]
                                let created_at = Duration::from_secs(
                                        self.highest_seen_timestamp.load(Ordering::Acquire) as u64
                                );
 -                              #[cfg(feature = "no-std")]
 +                              #[cfg(not(feature = "std"))]
                                let builder = refund.respond_using_derived_keys_no_std(
                                        payment_paths, payment_hash, created_at, expanded_key, entropy
                                )?;
 +                              let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
                                let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
                                let reply_path = self.create_blinded_path()
                                        .map_err(|_| Bolt12SemanticError::MissingPaths)?;
                                let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
                                if refund.paths().is_empty() {
                                        let message = new_pending_onion_message(
 -                                              OffersMessage::Invoice(invoice),
 +                                              OffersMessage::Invoice(invoice.clone()),
                                                Destination::Node(refund.payer_id()),
                                                Some(reply_path),
                                        );
                                        }
                                }
  
 -                              Ok(())
 +                              Ok(invoice)
                        },
                        Err(()) => Err(Bolt12SemanticError::InvalidAmount),
                }
        /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
        /// [`PaymentHash`] and [`PaymentPreimage`] for you.
        ///
 -      /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
 -      /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
 -      /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
 -      /// passed directly to [`claim_funds`].
 +      /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
 +      /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
 +      /// should then be passed directly to [`claim_funds`].
        ///
        /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
        ///
        /// [`claim_funds`]: Self::claim_funds
        /// [`PaymentClaimable`]: events::Event::PaymentClaimable
        /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
 -      /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
 -      /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
 +      /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
                min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
        /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
        fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
                let recipient = self.get_our_node_id();
 -              let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
  
                let peers = self.per_peer_state.read().unwrap()
                        .collect::<Vec<_>>();
  
                self.router
 -                      .create_blinded_paths(recipient, peers, entropy_source, secp_ctx)
 +                      .create_blinded_paths(recipient, peers, secp_ctx)
                        .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
  
        /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
        /// [`Router::create_blinded_payment_paths`].
        fn create_blinded_payment_paths(
 -              &self, amount_msats: u64, payment_secret: PaymentSecret
 +              &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
        ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
 -              let entropy_source = self.entropy_source.deref();
                let secp_ctx = &self.secp_ctx;
  
                let first_hops = self.list_usable_channels();
                let payee_node_id = self.get_our_node_id();
 -              let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
 +              let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
                        + LATENCY_GRACE_PERIOD_BLOCKS;
                let payee_tlvs = ReceiveTlvs {
                        payment_secret,
                                max_cltv_expiry,
                                htlc_minimum_msat: 1,
                        },
 +                      payment_context,
                };
                self.router.create_blinded_payment_paths(
 -                      payee_node_id, first_hops, payee_tlvs, amount_msats, entropy_source, secp_ctx
 +                      payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
                )
        }
  
        ///
        /// [phantom node payments]: crate::sign::PhantomKeysManager
        pub fn get_phantom_scid(&self) -> u64 {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
                        let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
        /// Note that this method is not guaranteed to return unique values, you may need to call it a few
        /// times to get a unique scid.
        pub fn get_intercept_scid(&self) -> u64 {
 -              let best_block_height = self.best_block.read().unwrap().height();
 +              let best_block_height = self.best_block.read().unwrap().height;
                let short_to_chan_info = self.short_to_chan_info.read().unwrap();
                loop {
                        let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
        /// [`Event`] being handled) completes, this should be called to restore the channel to normal
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
 -      fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
 +      fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
 +              channel_funding_outpoint: OutPoint, channel_id: ChannelId,
 +              mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
 +
                let logger = WithContext::from(
 -                      &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
 +                      &self.logger, Some(counterparty_node_id), Some(channel_id),
                );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                                if let Some(blocker) = completed_blocker.take() {
                                        // Only do this on the first iteration of the loop.
                                        if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
 -                                              .get_mut(&channel_funding_outpoint.to_channel_id())
 +                                              .get_mut(&channel_id)
                                        {
                                                blockers.retain(|iter| iter != &blocker);
                                        }
                                }
  
                                if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
 -                                      channel_funding_outpoint, counterparty_node_id) {
 +                                      channel_funding_outpoint, channel_id, counterparty_node_id) {
                                        // Check that, while holding the peer lock, we don't have anything else
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
                                        log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
 -                                              &channel_funding_outpoint.to_channel_id());
 +                                              &channel_id);
                                        break;
                                }
  
 -                              if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
 +                              if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
 +                                      channel_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
 -                                                              channel_funding_outpoint.to_channel_id());
 +                                                              channel_id);
                                                        handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
                                                        }
                                                } else {
                                                        log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
 -                                                              channel_funding_outpoint.to_channel_id());
 +                                                              channel_id);
                                                }
                                        }
                                }
                for action in actions {
                        match action {
                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
 -                                      channel_funding_outpoint, counterparty_node_id
 +                                      channel_funding_outpoint, channel_id, counterparty_node_id
                                } => {
 -                                      self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
 +                                      self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
                                }
                        }
                }
@@@ -9226,7 -8175,7 +9226,7 @@@ wher
        /// will randomly be placed first or last in the returned array.
        ///
        /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
 -      /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
 +      /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
        /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
                                result = NotifyOption::DoPersist;
                        }
  
 +                      let mut is_any_peer_connected = false;
                        let mut pending_events = Vec::new();
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
                                if peer_state.pending_msg_events.len() > 0 {
                                        pending_events.append(&mut peer_state.pending_msg_events);
                                }
 +                              if peer_state.is_connected {
 +                                      is_any_peer_connected = true
 +                              }
 +                      }
 +
 +                      // Ensure that we are connected to some peers before getting broadcast messages.
 +                      if is_any_peer_connected {
 +                              let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
 +                              pending_events.append(&mut broadcast_msgs);
                        }
  
                        if !pending_events.is_empty() {
@@@ -9311,9 -8250,9 +9311,9 @@@ wher
        fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
                {
                        let best_block = self.best_block.read().unwrap();
 -                      assert_eq!(best_block.block_hash(), header.prev_blockhash,
 +                      assert_eq!(best_block.block_hash, header.prev_blockhash,
                                "Blocks must be connected in chain-order - the connected header must build on the last connected header");
 -                      assert_eq!(best_block.height(), height - 1,
 +                      assert_eq!(best_block.height, height - 1,
                                "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
                }
  
                let new_height = height - 1;
                {
                        let mut best_block = self.best_block.write().unwrap();
 -                      assert_eq!(best_block.block_hash(), header.block_hash(),
 +                      assert_eq!(best_block.block_hash, header.block_hash(),
                                "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
 -                      assert_eq!(best_block.height(), height,
 +                      assert_eq!(best_block.height, height,
                                "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
@@@ -9364,7 -8303,7 +9364,7 @@@ wher
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
                        .map(|(a, b)| (a, Vec::new(), b)));
  
 -              let last_best_block_height = self.best_block.read().unwrap().height();
 +              let last_best_block_height = self.best_block.read().unwrap().height;
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
                        self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
@@@ -9468,14 -8407,10 +9468,14 @@@ wher
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 +
                                peer_state.channel_by_id.retain(|_, phase| {
                                        match phase {
                                                // Retain unfunded channels.
                                                ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
 +                                              // TODO(dual_funding): Combine this match arm with above.
 +                                              #[cfg(any(dual_funding, splicing))]
 +                                              ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
                                                ChannelPhase::Funded(channel) => {
                                                        let res = f(channel);
                                                        if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                                                let reason_message = format!("{}", reason);
                                                                failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
 -                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                      let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
 +                                                                      pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
                                                incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
                                                phantom_shared_secret: None,
                                                outpoint: htlc.prev_funding_outpoint,
 +                                              channel_id: htlc.prev_channel_id,
                                                blinded_failure: htlc.forward_info.routing.blinded_failure(),
                                        });
  
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
                                        let logger = WithContext::from(
 -                                              &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
 +                                              &self.logger, None, Some(htlc.prev_channel_id)
                                        );
                                        log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
        }
  
        /// Returns true if this [`ChannelManager`] needs to be persisted.
 +      ///
 +      /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
 +      /// indicates this should be checked.
        pub fn get_and_clear_needs_persistence(&self) -> bool {
                self.needs_persist_flag.swap(false, Ordering::AcqRel)
        }
@@@ -9729,7 -8659,7 +9729,7 @@@ wher
        fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
 -                       msg.temporary_channel_id.clone())), *counterparty_node_id);
 +                       msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
  
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
        fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
 -                       msg.temporary_channel_id.clone())), *counterparty_node_id);
 +                       msg.common_fields.temporary_channel_id.clone())), *counterparty_node_id);
        }
  
        fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
                         msg.channel_id.clone())), *counterparty_node_id);
        }
  
 +      #[cfg(splicing)]
        fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported".to_owned(),
                         msg.channel_id.clone())), *counterparty_node_id);
        }
  
 +      #[cfg(splicing)]
        fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported (splice_ack)".to_owned(),
                         msg.channel_id.clone())), *counterparty_node_id);
        }
  
 +      #[cfg(splicing)]
        fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported (splice_locked)".to_owned(),
                                                        }
                                                        &mut chan.context
                                                },
 -                                              // Unfunded channels will always be removed.
 -                                              ChannelPhase::UnfundedOutboundV1(chan) => {
 -                                                      &mut chan.context
 +                                              // We retain UnfundedOutboundV1 channel for some time in case
 +                                              // peer unexpectedly disconnects, and intends to reconnect again.
 +                                              ChannelPhase::UnfundedOutboundV1(_) => {
 +                                                      return true;
                                                },
 +                                              // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
 +                                              #[cfg(any(dual_funding, splicing))]
 +                                              ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                      &mut chan.context
 +                                              },
 +                                              #[cfg(any(dual_funding, splicing))]
 +                                              ChannelPhase::UnfundedInboundV2(chan) => {
 +                                                      &mut chan.context
 +                                              },
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
                                                // Gossip
                                                &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
                                                &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
 -                                              &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
 +                                              // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
 +                                              // This check here is to ensure exhaustivity.
 +                                              &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
 +                                                      debug_assert!(false, "This event shouldn't have been here");
 +                                                      false
 +                                              },
                                                &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                                &events::MessageSendEvent::SendChannelUpdate { .. } => false,
                                                &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                                        return NotifyOption::SkipPersistNoEvents;
                                                }
                                                e.insert(Mutex::new(PeerState {
 -                                                      channel_by_id: HashMap::new(),
 -                                                      inbound_channel_request_by_id: HashMap::new(),
 +                                                      channel_by_id: new_hash_map(),
 +                                                      inbound_channel_request_by_id: new_hash_map(),
                                                        latest_features: init_msg.features.clone(),
                                                        pending_msg_events: Vec::new(),
                                                        in_flight_monitor_updates: BTreeMap::new(),
                                                let mut peer_state = e.get().lock().unwrap();
                                                peer_state.latest_features = init_msg.features.clone();
  
 -                                              let best_block_height = self.best_block.read().unwrap().height();
 +                                              let best_block_height = self.best_block.read().unwrap().height;
                                                if inbound_peer_limited &&
                                                        Self::unfunded_channel_count(&*peer_state, best_block_height) ==
                                                        peer_state.channel_by_id.len()
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
  
 -                              peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
 -                                      if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
 -                              ).for_each(|chan| {
 -                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 -                                      pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
 -                                              node_id: chan.context.get_counterparty_node_id(),
 -                                              msg: chan.get_channel_reestablish(&&logger),
 -                                      });
 -                              });
 +                              for (_, phase) in peer_state.channel_by_id.iter_mut() {
 +                                      match phase {
 +                                              ChannelPhase::Funded(chan) => {
 +                                                      let logger = WithChannelContext::from(&self.logger, &chan.context);
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_channel_reestablish(&&logger),
 +                                                      });
 +                                              }
 +
 +                                              ChannelPhase::UnfundedOutboundV1(chan) => {
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_open_channel(self.chain_hash),
 +                                                      });
 +                                              }
 +
 +                                              // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
 +                                              #[cfg(any(dual_funding, splicing))]
 +                                              ChannelPhase::UnfundedOutboundV2(chan) => {
 +                                                      pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
 +                                                              node_id: chan.context.get_counterparty_node_id(),
 +                                                              msg: chan.get_open_channel_v2(self.chain_hash),
 +                                                      });
 +                                              },
 +
 +                                              ChannelPhase::UnfundedInboundV1(_) => {
 +                                                      // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
 +                                                      // they are not persisted and won't be recovered after a crash.
 +                                                      // Therefore, they shouldn't exist at this point.
 +                                                      debug_assert!(false);
 +                                              }
 +
 +                                              // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
 +                                              #[cfg(any(dual_funding, splicing))]
 +                                              ChannelPhase::UnfundedInboundV2(channel) => {
 +                                                      // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
 +                                                      // they are not persisted and won't be recovered after a crash.
 +                                                      // Therefore, they shouldn't exist at this point.
 +                                                      debug_assert!(false);
 +                                              },
 +                                      }
 +                              }
                        }
  
                        return NotifyOption::SkipPersistHandleEvents;
        }
  
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
 -              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 -
                match &msg.data as &str {
                        "cannot co-op close channel w/ active htlcs"|
                        "link failed to shutdown" =>
                                // We're not going to bother handling this in a sensible way, instead simply
                                // repeating the Shutdown message on repeat until morale improves.
                                if !msg.channel_id.is_zero() {
 -                                      let per_peer_state = self.per_peer_state.read().unwrap();
 -                                      let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
 -                                      if peer_state_mutex_opt.is_none() { return; }
 -                                      let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
 -                                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
 -                                              if let Some(msg) = chan.get_outbound_shutdown() {
 -                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 -                                                              node_id: *counterparty_node_id,
 -                                                              msg,
 -                                                      });
 -                                              }
 -                                              peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 -                                                      node_id: *counterparty_node_id,
 -                                                      action: msgs::ErrorAction::SendWarningMessage {
 -                                                              msg: msgs::WarningMessage {
 -                                                                      channel_id: msg.channel_id,
 -                                                                      data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
 -                                                              },
 -                                                              log_level: Level::Trace,
 +                                      PersistenceNotifierGuard::optionally_notify(
 +                                              self,
 +                                              || -> NotifyOption {
 +                                                      let per_peer_state = self.per_peer_state.read().unwrap();
 +                                                      let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
 +                                                      if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
 +                                                      let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
 +                                                      if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
 +                                                              if let Some(msg) = chan.get_outbound_shutdown() {
 +                                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                                                              node_id: *counterparty_node_id,
 +                                                                              msg,
 +                                                                      });
 +                                                              }
 +                                                              peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                                      node_id: *counterparty_node_id,
 +                                                                      action: msgs::ErrorAction::SendWarningMessage {
 +                                                                              msg: msgs::WarningMessage {
 +                                                                                      channel_id: msg.channel_id,
 +                                                                                      data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
 +                                                                              },
 +                                                                              log_level: Level::Trace,
 +                                                                      }
 +                                                              });
 +                                                              // This can happen in a fairly tight loop, so we absolutely cannot trigger
 +                                                              // a `ChannelManager` write here.
 +                                                              return NotifyOption::SkipPersistHandleEvents;
                                                        }
 -                                              });
 -                                      }
 +                                                      NotifyOption::SkipPersistNoEvents
 +                                              }
 +                                      );
                                }
                                return;
                        }
                        _ => {}
                }
  
 +              let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 +
                if msg.channel_id.is_zero() {
                        let channel_ids: Vec<ChannelId> = {
                                let per_peer_state = self.per_peer_state.read().unwrap();
                                if peer_state_mutex_opt.is_none() { return; }
                                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
 -                              if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
 -                                      if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 -                                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 -                                                      node_id: *counterparty_node_id,
 -                                                      msg,
 -                                              });
 -                                              return;
 -                                      }
 +                              match peer_state.channel_by_id.get_mut(&msg.channel_id) {
 +                                      Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
 +                                              if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
 +                                                              node_id: *counterparty_node_id,
 +                                                              msg,
 +                                                      });
 +                                                      return;
 +                                              }
 +                                      },
 +                                      #[cfg(any(dual_funding, splicing))]
 +                                      Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
 +                                              if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
 +                                                              node_id: *counterparty_node_id,
 +                                                              msg,
 +                                                      });
 +                                                      return;
 +                                              }
 +                                      },
 +                                      None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
 +                                      #[cfg(any(dual_funding, splicing))]
 +                                      Some(ChannelPhase::UnfundedInboundV2(_)) => (),
                                }
                        }
  
                                        },
                                };
  
 +                              let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
 +                                      offer_id: invoice_request.offer_id,
 +                                      invoice_request: invoice_request.fields(),
 +                              });
                                let payment_paths = match self.create_blinded_payment_paths(
 -                                      amount_msats, payment_secret
 +                                      amount_msats, payment_secret, payment_context
                                ) {
                                        Ok(payment_paths) => payment_paths,
                                        Err(()) => {
                                        },
                                };
  
 -                              #[cfg(feature = "no-std")]
 +                              #[cfg(not(feature = "std"))]
                                let created_at = Duration::from_secs(
                                        self.highest_seen_timestamp.load(Ordering::Acquire) as u64
                                );
  
 -                              if invoice_request.keys.is_some() {
 -                                      #[cfg(not(feature = "no-std"))]
 +                              let response = if invoice_request.keys.is_some() {
 +                                      #[cfg(feature = "std")]
                                        let builder = invoice_request.respond_using_derived_keys(
                                                payment_paths, payment_hash
                                        );
 -                                      #[cfg(feature = "no-std")]
 +                                      #[cfg(not(feature = "std"))]
                                        let builder = invoice_request.respond_using_derived_keys_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
 -                                      match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
 -                                              Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
 -                                              Err(error) => Some(OffersMessage::InvoiceError(error.into())),
 -                                      }
 +                                      builder
 +                                              .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
 +                                              .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
 +                                              .map_err(InvoiceError::from)
                                } else {
 -                                      #[cfg(not(feature = "no-std"))]
 +                                      #[cfg(feature = "std")]
                                        let builder = invoice_request.respond_with(payment_paths, payment_hash);
 -                                      #[cfg(feature = "no-std")]
 +                                      #[cfg(not(feature = "std"))]
                                        let builder = invoice_request.respond_with_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
 -                                      let response = builder.and_then(|builder| builder.allow_mpp().build())
 -                                              .map_err(|e| OffersMessage::InvoiceError(e.into()))
 -                                              .and_then(|invoice|
 -                                                      match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
 -                                                              Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
 -                                                              Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
 -                                                                              InvoiceError::from_string("Failed signing invoice".to_string())
 -                                                              )),
 -                                                              Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
 -                                                                              InvoiceError::from_string("Failed invoice signature verification".to_string())
 -                                                              )),
 -                                                      });
 -                                      match response {
 -                                              Ok(invoice) => Some(invoice),
 -                                              Err(error) => Some(error),
 -                                      }
 +                                      builder
 +                                              .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
 +                                              .and_then(|builder| builder.allow_mpp().build())
 +                                              .map_err(InvoiceError::from)
 +                                              .and_then(|invoice| {
 +                                                      #[cfg(c_bindings)]
 +                                                      let mut invoice = invoice;
 +                                                      invoice
 +                                                              .sign(|invoice: &UnsignedBolt12Invoice|
 +                                                                      self.node_signer.sign_bolt12_invoice(invoice)
 +                                                              )
 +                                                              .map_err(InvoiceError::from)
 +                                              })
 +                              };
 +
 +                              match response {
 +                                      Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
 +                                      Err(error) => Some(OffersMessage::InvoiceError(error.into())),
                                }
                        },
                        OffersMessage::Invoice(invoice) => {
 -                              match invoice.verify(expanded_key, secp_ctx) {
 -                                      Err(()) => {
 -                                              Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned())))
 -                                      },
 -                                      Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => {
 -                                              Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into()))
 -                                      },
 -                                      Ok(payment_id) => {
 -                                              if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) {
 -                                                      log_trace!(self.logger, "Failed paying invoice: {:?}", e);
 -                                                      Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e))))
 +                              let response = invoice
 +                                      .verify(expanded_key, secp_ctx)
 +                                      .map_err(|()| InvoiceError::from_string("Unrecognized invoice".to_owned()))
 +                                      .and_then(|payment_id| {
 +                                              let features = self.bolt12_invoice_features();
 +                                              if invoice.invoice_features().requires_unknown_bits_from(&features) {
 +                                                      Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures))
                                                } else {
 -                                                      None
 +                                                      self.send_payment_for_bolt12_invoice(&invoice, payment_id)
 +                                                              .map_err(|e| {
 +                                                                      log_trace!(self.logger, "Failed paying invoice: {:?}", e);
 +                                                                      InvoiceError::from_string(format!("{:?}", e))
 +                                                              })
                                                }
 -                                      },
 +                                      });
 +
 +                              match response {
 +                                      Ok(()) => None,
 +                                      Err(e) => Some(OffersMessage::InvoiceError(e)),
                                }
                        },
                        OffersMessage::InvoiceError(invoice_error) => {
        }
  }
  
 +impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
 +NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
 +where
 +      M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
 +      T::Target: BroadcasterInterface,
 +      ES::Target: EntropySource,
 +      NS::Target: NodeSigner,
 +      SP::Target: SignerProvider,
 +      F::Target: FeeEstimator,
 +      R::Target: Router,
 +      L::Target: Logger,
 +{
 +      fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
 +              self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
 +      }
 +}
 +
  /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
  /// [`ChannelManager`].
  pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
@@@ -10592,8 -9419,6 +10592,8 @@@ impl Writeable for ChannelDetails 
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
                        (41, self.channel_shutdown_state, option),
 +                      (43, self.pending_inbound_htlcs, optional_vec),
 +                      (45, self.pending_outbound_htlcs, optional_vec),
                });
                Ok(())
        }
@@@ -10632,8 -9457,6 +10632,8 @@@ impl Readable for ChannelDetails 
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
                        (41, channel_shutdown_state, option),
 +                      (43, pending_inbound_htlcs, optional_vec),
 +                      (45, pending_outbound_htlcs, optional_vec),
                });
  
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
                        channel_shutdown_state,
 +                      pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
 +                      pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
                })
        }
  }
@@@ -10700,11 -9521,9 +10700,11 @@@ impl_writeable_tlv_based_enum!(PendingH
                (3, payment_metadata, option),
                (5, custom_tlvs, optional_vec),
                (7, requires_blinded_error, (default_value, false)),
 +              (9, payment_context, option),
        },
        (2, ReceiveKeysend) => {
                (0, payment_preimage, required),
 +              (1, requires_blinded_error, (default_value, false)),
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
                (4, payment_data, option), // Added in 0.0.116
@@@ -10808,18 -9627,13 +10808,18 @@@ impl_writeable_tlv_based!(HTLCPreviousH
        (4, htlc_id, required),
        (6, incoming_packet_shared_secret, required),
        (7, user_channel_id, option),
 +      // Note that by the time we get past the required read for type 2 above, outpoint will be
 +      // filled in, so we can safely unwrap it here.
 +      (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
  });
  
  impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
 -              let (payment_data, keysend_preimage) = match &self.onion_payload {
 -                      OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
 -                      OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
 +              let (payment_data, keysend_preimage, payment_context) = match &self.onion_payload {
 +                      OnionPayload::Invoice { _legacy_hop_data, payment_context } => {
 +                              (_legacy_hop_data.as_ref(), None, payment_context.as_ref())
 +                      },
 +                      OnionPayload::Spontaneous(preimage) => (None, Some(preimage), None),
                };
                write_tlv_fields!(writer, {
                        (0, self.prev_hop, required),
                        (6, self.cltv_expiry, required),
                        (8, keysend_preimage, option),
                        (10, self.counterparty_skimmed_fee_msat, option),
 +                      (11, payment_context, option),
                });
                Ok(())
        }
@@@ -10849,7 -9662,6 +10849,7 @@@ impl Readable for ClaimableHTLC 
                        (6, cltv_expiry, required),
                        (8, keysend_preimage, option),
                        (10, counterparty_skimmed_fee_msat, option),
 +                      (11, payment_context, option),
                });
                let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
                let value = value_ser.0.unwrap();
                                        }
                                        total_msat = Some(payment_data.as_ref().unwrap().total_msat);
                                }
 -                              OnionPayload::Invoice { _legacy_hop_data: payment_data }
 +                              OnionPayload::Invoice { _legacy_hop_data: payment_data, payment_context }
                        },
                };
                Ok(Self {
@@@ -10966,9 -9778,6 +10966,9 @@@ impl_writeable_tlv_based!(PendingAddHTL
        (2, prev_short_channel_id, required),
        (4, prev_htlc_id, required),
        (6, prev_funding_outpoint, required),
 +      // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
 +      // filled in, so we can safely unwrap it here.
 +      (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
  });
  
  impl Writeable for HTLCForwardInfo {
                self.chain_hash.write(writer)?;
                {
                        let best_block = self.best_block.read().unwrap();
 -                      best_block.height().write(writer)?;
 -                      best_block.block_hash().write(writer)?;
 +                      best_block.height.write(writer)?;
 +                      best_block.block_hash.write(writer)?;
                }
  
 +              let per_peer_state = self.per_peer_state.write().unwrap();
 +
                let mut serializable_peer_count: u64 = 0;
                {
 -                      let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut number_of_funded_channels = 0;
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        }
                }
  
 -              let per_peer_state = self.per_peer_state.write().unwrap();
 +              let mut decode_update_add_htlcs_opt = None;
 +              let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
 +              if !decode_update_add_htlcs.is_empty() {
 +                      decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
 +              }
  
                let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
                let claimable_payments = self.claimable_payments.lock().unwrap();
                }
  
                // Encode without retry info for 0.0.101 compatibility.
 -              let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
 +              let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
                for (id, outbound) in pending_outbound_payments.iter() {
                        match outbound {
                                PendingOutboundPayment::Legacy { session_privs } |
                for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
                        for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
                                if !updates.is_empty() {
 -                                      if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
 +                                      if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
                                        in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
                                }
                        }
                        (10, in_flight_monitor_updates, option),
                        (11, self.probing_cookie_secret, required),
                        (13, htlc_onion_fields, optional_vec),
 +                      (14, decode_update_add_htlcs_opt, option),
                });
  
                Ok(())
@@@ -11435,9 -10238,7 +11435,9 @@@ wher
                        mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
                Self {
                        entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
 -                      channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
 +                      channel_monitors: hash_map_from_iter(
 +                              channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
 +                      ),
                }
        }
  }
                let mut failed_htlcs = Vec::new();
  
                let channel_count: u64 = Readable::read(reader)?;
 -              let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut outpoint_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 -              let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
 +              let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
                let mut close_background_events = Vec::new();
 +              let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
                for _ in 0..channel_count {
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
                        let logger = WithChannelContext::from(&args.logger, &channel.context);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
 +                      funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
 -                                      if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
 +                                      if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
 -                                                      counterparty_node_id, funding_txo, update
 +                                                      counterparty_node_id, funding_txo, channel_id, update
                                                });
                                        }
                                        failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
                                                }
                                        }
                                } else {
-                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+                                       channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
+                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
                                                &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
-                                               monitor.get_latest_update_id());
+                                               monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
 -                                                      let mut by_id_map = HashMap::new();
 +                                                      let mut by_id_map = new_hash_map();
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                        entry.insert(by_id_map);
                                                }
                for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
                                let logger = WithChannelMonitor::from(&args.logger, monitor);
 +                              let channel_id = monitor.channel_id();
                                log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
 -                                      &funding_txo.to_channel_id());
 +                                      &channel_id);
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        counterparty_node_id: None,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
 +                                      channel_id: Some(monitor.channel_id()),
                                };
 -                              close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
 +                              close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
                        }
                }
  
                const MAX_ALLOC_SIZE: usize = 1024 * 64;
                let forward_htlcs_count: u64 = Readable::read(reader)?;
 -              let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
 +              let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
                for _ in 0..forward_htlcs_count {
                        let short_channel_id = Readable::read(reader)?;
                        let pending_forwards_count: u64 = Readable::read(reader)?;
                let peer_state_from_chans = |channel_by_id| {
                        PeerState {
                                channel_by_id,
 -                              inbound_channel_request_by_id: HashMap::new(),
 +                              inbound_channel_request_by_id: new_hash_map(),
                                latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
                                in_flight_monitor_updates: BTreeMap::new(),
                };
  
                let peer_count: u64 = Readable::read(reader)?;
 -              let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
 +              let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
 -                      let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
 +                      let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
                        let mut peer_state = peer_state_from_chans(peer_chans);
                        peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
  
                let pending_inbound_payment_count: u64 = Readable::read(reader)?;
 -              let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
 +              let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
                for _ in 0..pending_inbound_payment_count {
                        if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
                                return Err(DecodeError::InvalidValue);
  
                let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
                let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
 -                      HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
 +                      hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
                for _ in 0..pending_outbound_payments_count_compat {
                        let session_priv = Readable::read(reader)?;
                        let payment = PendingOutboundPayment::Legacy {
 -                              session_privs: [session_priv].iter().cloned().collect()
 +                              session_privs: hash_set_from_iter([session_priv]),
                        };
                        if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
                                return Err(DecodeError::InvalidValue)
                // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
                let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
 -              let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
 +              let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                let mut claimable_htlc_onion_fields = None;
 -              let mut pending_claiming_payments = Some(HashMap::new());
 +              let mut pending_claiming_payments = Some(new_hash_map());
                let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
                let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
 +              let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
                        (10, in_flight_monitor_updates, option),
                        (11, probing_cookie_secret, option),
                        (13, claimable_htlc_onion_fields, optional_vec),
 +                      (14, decode_update_add_htlcs, option),
                });
 +              let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
                }
                if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
                } else if pending_outbound_payments.is_none() {
 -                      let mut outbounds = HashMap::new();
 +                      let mut outbounds = new_hash_map();
                        for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
                                outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
                        }
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
                                        log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
 -                                              update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
 +                                              update.update_id, $channel_info_log, &$monitor.channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id: $counterparty_node_id,
                                                        funding_txo: $funding_txo,
 +                                                      channel_id: $monitor.channel_id(),
                                                        update: update.clone(),
                                                });
                                }
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdatesComplete {
                                                        counterparty_node_id: $counterparty_node_id,
 -                                                      channel_id: $funding_txo.to_channel_id(),
 +                                                      channel_id: $monitor.channel_id(),
                                                });
                                }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
                                                }
                                        }
                                        if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
 -                                              // If the channel is ahead of the monitor, return InvalidValue:
 +                                              // If the channel is ahead of the monitor, return DangerousValue:
                                                log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                                log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
                                                        chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
                                                log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                                log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                                log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
 -                                              return Err(DecodeError::InvalidValue);
 +                                              return Err(DecodeError::DangerousValue);
                                        }
                                } else {
                                        // We shouldn't have persisted (or read) any unfunded channel types so none should have been
  
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
 -                              let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
 +                              let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
 +                              let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
                                        // creating the neccessary peer_state entries as we go.
                                        let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
 -                                              Mutex::new(peer_state_from_chans(HashMap::new()))
 +                                              Mutex::new(peer_state_from_chans(new_hash_map()))
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
                                                funding_txo, monitor, peer_state, logger, "closed ");
                                } else {
                                        log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
 -                                      log_error!(logger, " The ChannelMonitor for channel {} is missing.",
 -                                              &funding_txo.to_channel_id());
 +                                      log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
 +                                              channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
                                        log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                        log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
 +                                      log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
                                                                                retry_strategy: None,
                                                                                attempts: PaymentAttempts::new(),
                                                                                payment_params: None,
 -                                                                              session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
 +                                                                              session_privs: hash_set_from_iter([session_priv_bytes]),
                                                                                payment_hash: htlc.payment_hash,
                                                                                payment_secret: None, // only used for retries, and we'll never retry on startup
                                                                                payment_metadata: None, // only used for retries, and we'll never retry on startup
                                                                // still have an entry for this HTLC in `forward_htlcs` or
                                                                // `pending_intercepted_htlcs`, we were apparently not persisted after
                                                                // the monitor was when forwarding the payment.
 +                                                              decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
 +                                                                      update_add_htlcs.retain(|update_add_htlc| {
 +                                                                              let matches = *scid == prev_hop_data.short_channel_id &&
 +                                                                                      update_add_htlc.htlc_id == prev_hop_data.htlc_id;
 +                                                                              if matches {
 +                                                                                      log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
 +                                                                                              &htlc.payment_hash, &monitor.channel_id());
 +                                                                              }
 +                                                                              !matches
 +                                                                      });
 +                                                                      !update_add_htlcs.is_empty()
 +                                                              });
                                                                forward_htlcs.retain(|_, forwards| {
                                                                        forwards.retain(|forward| {
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                                log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
 -                                                                                                      &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
 +                                                                                                      &htlc.payment_hash, &monitor.channel_id());
                                                                                                false
                                                                                        } else { true }
                                                                                } else { true }
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
 -                                                                                      &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
 +                                                                                      &htlc.payment_hash, &monitor.channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
                                                                                                intercepted_id != ev_id
                                                                        let compl_action =
                                                                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                                                                        channel_funding_outpoint: monitor.get_funding_txo().0,
 +                                                                                      channel_id: monitor.channel_id(),
                                                                                        counterparty_node_id: path.hops[0].pubkey,
                                                                                };
                                                                        pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
                                                                        // channel_id -> peer map entry).
                                                                        counterparty_opt.is_none(),
                                                                        counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
 -                                                                      monitor.get_funding_txo().0))
 +                                                                      monitor.get_funding_txo().0, monitor.channel_id()))
                                                        } else { None }
                                                } else {
                                                        // If it was an outbound payment, we've handled it above - if a preimage
                        }
                }
  
 -              if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
 +              if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
                        // If we have pending HTLCs to forward, assume we either dropped a
                        // `PendingHTLCsForwardable` or the user received it but never processed it as they
                        // shut down before the timer hit. Either way, set the time_forwardable to a small
                let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
  
 -              let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
 +              let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
                if let Some(purposes) = claimable_htlc_purposes {
                        if purposes.len() != claimable_htlcs_list.len() {
                                return Err(DecodeError::InvalidValue);
                                        return Err(DecodeError::InvalidValue);
                                }
                                let purpose = match &htlcs[0].onion_payload {
 -                                      OnionPayload::Invoice { _legacy_hop_data } => {
 +                                      OnionPayload::Invoice { _legacy_hop_data, payment_context: _ } => {
                                                if let Some(hop_data) = _legacy_hop_data {
 -                                                      events::PaymentPurpose::InvoicePayment {
 +                                                      events::PaymentPurpose::Bolt11InvoicePayment {
                                                                payment_preimage: match pending_inbound_payments.get(&payment_hash) {
                                                                        Some(inbound_payment) => inbound_payment.payment_preimage,
                                                                        None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
                        }
                }
  
 -              let mut outbound_scid_aliases = HashSet::new();
 +              let mut outbound_scid_aliases = new_hash_set();
                for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                                                // this channel as well. On the flip side, there's no harm in restarting
                                                // without the new monitor persisted - we'll end up right back here on
                                                // restart.
 -                                              let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
 +                                              let previous_channel_id = claimable_htlc.prev_hop.channel_id;
                                                if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
 -                                                              Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
 +                                                              Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
 -                                                      if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
 +                                                      if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
 -                                                                      blocked_channel_outpoint.to_channel_id());
 +                                                                      blocked_channel_id);
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
 -                                                                      .entry(blocked_channel_outpoint.to_channel_id())
 +                                                                      .entry(*blocked_channel_id)
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
                                                        } else {
                                                                // If the channel we were blocking has closed, we don't need to
                        pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
  
                        forward_htlcs: Mutex::new(forward_htlcs),
 +                      decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        outpoint_to_peer: Mutex::new(outpoint_to_peer),
  
                        pending_offers_messages: Mutex::new(Vec::new()),
  
 +                      pending_broadcast_messages: Mutex::new(Vec::new()),
 +
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
                        signer_provider: args.signer_provider,
                        channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
  
 -              for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
 +              for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
                        // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
 -                      channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
 -                              downstream_closed, true, downstream_node_id, downstream_funding);
 +                      channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
 +                              downstream_closed, true, downstream_node_id, downstream_funding,
 +                              downstream_channel_id, None
 +                      );
                }
  
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@@ -12856,61 -11630,6 +12857,61 @@@ mod tests 
                }
        }
  
 +      #[test]
 +      fn test_channel_update_cached() {
 +              let chanmon_cfgs = create_chanmon_cfgs(3);
 +              let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
 +              let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
 +              let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
 +
 +              let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 +
 +              nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
 +              check_added_monitors!(nodes[0], 1);
 +              check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
 +
 +              // Confirm that the channel_update was not sent immediately to node[1] but was cached.
 +              let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
 +              assert_eq!(node_1_events.len(), 0);
 +
 +              {
 +                      // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
 +                      let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
 +                      assert_eq!(pending_broadcast_messages.len(), 1);
 +              }
 +
 +              // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
 +              nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
 +              nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 +
 +              nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
 +              nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 +
 +              let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(node_0_events.len(), 0);
 +
 +              // Now we reconnect to a peer
 +              nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
 +                      features: nodes[2].node.init_features(), networks: None, remote_network_address: None
 +              }, true).unwrap();
 +              nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
 +                      features: nodes[0].node.init_features(), networks: None, remote_network_address: None
 +              }, false).unwrap();
 +
 +              // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
 +              let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
 +              assert_eq!(node_0_events.len(), 1);
 +              match &node_0_events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { .. } => (),
 +                      _ => panic!("Unexpected event"),
 +              }
 +              {
 +                      // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
 +                      let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
 +                      assert_eq!(pending_broadcast_messages.len(), 0);
 +              }
 +      }
 +
        #[test]
        fn test_drop_disconnected_peers_when_removing_channels() {
                let chanmon_cfgs = create_chanmon_cfgs(2);
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
  
 -              check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 -              check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
 +              check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 +              check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
        }
  
        fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
                                check_added_monitors!(nodes[0], 1);
                                expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
                        }
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
 -              open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +              open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
 +                      &nodes[0].keys_manager);
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
                // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
                for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
                        nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // Of course, however, outbound channels are always allowed
                nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
                for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
                        nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
                // rejected.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // but we can still open an outbound channel.
                nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
                // but even with such an outbound channel, additional inbound channels will still fail.
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
        }
  
        #[test]
                                _ => panic!("Unexpected event"),
                        }
                        get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
 -                      open_channel_msg.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
 +                      open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
                }
  
                // If we try to accept a channel from another peer non-0conf it will fail.
                        _ => panic!("Unexpected event"),
                }
                assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
 -                      open_channel_msg.temporary_channel_id);
 +                      open_channel_msg.common_fields.temporary_channel_id);
  
                // ...however if we accept the same channel 0conf it should work just fine.
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                };
                // Check that if the amount we received + the penultimate hop extra fee is less than the sender
                // intended amount, we fail the payment.
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
                        create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                                sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
                        }),
                        custom_tlvs: Vec::new(),
                };
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                        sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
                        current_height, node[0].node.default_configuration.accept_mpp_keysend).is_ok());
                let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
                let node = create_network(1, &node_cfg, &node_chanmgr);
  
 -              let current_height: u32 = node[0].node.best_block.read().unwrap().height();
 +              let current_height: u32 = node[0].node.best_block.read().unwrap().height;
                let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
                        sender_intended_htlc_amt_msat: 100,
                        cltv_expiry_height: 22,
  
                nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -              assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
 +              assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
  
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
                let events = nodes[1].node.get_and_clear_pending_events();
                nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
  
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 -              assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
 +              assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
  
                // Since nodes[1] should not have accepted the channel, it should
                // not have generated any events.
  
  
                let (scid_1, scid_2) = (42, 43);
 -              let mut forward_htlcs = HashMap::new();
 +              let mut forward_htlcs = new_hash_map();
                forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
                forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
  
@@@ -13780,7 -12498,7 +13781,7 @@@ pub mod bench 
        use bitcoin::blockdata::locktime::absolute::LockTime;
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
 -      use bitcoin::{Block, Transaction, TxOut};
 +      use bitcoin::{Transaction, TxOut};
  
        use crate::sync::{Arc, Mutex, RwLock};
  
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
                let scorer = RwLock::new(test_utils::TestScorer::new());
 -              let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
 +              let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
  
                let mut config: UserConfig = Default::default();
                config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
  
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
  
 -              let block = create_dummy_block(BestBlock::from_network(network).block_hash(), 42, vec![tx]);
 +              let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
  
index 30615b86d2f6560057276370770446ce2c99881b,26bec030f13bcb126bb92f299ad4dfc5425dca27..d5f28c23b4e6271510f78a741e038deeb3e05c3a
@@@ -9,13 -9,13 +9,13 @@@
  
  //! Further functional tests which test blockchain reorganizations.
  
 -use crate::sign::{ecdsa::EcdsaChannelSigner, SpendableOutputDescriptor};
 +use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor};
  use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance};
  use crate::chain::transaction::OutPoint;
  use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight};
  use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource};
  use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 -use crate::ln::channel;
 +use crate::ln::{channel, ChannelId};
  use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields};
  use crate::ln::msgs::ChannelMessageHandler;
  use crate::util::config::UserConfig;
@@@ -158,60 -158,6 +158,60 @@@ fn revoked_output_htlc_resolution_timin
        expect_payment_failed!(nodes[1], payment_hash_1, false);
  }
  
 +#[test]
 +fn archive_fully_resolved_monitors() {
 +      // Test we can archive fully resolved channel monitor.
 +      let chanmon_cfgs = create_chanmon_cfgs(2);
 +      let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
 +      let mut user_config = test_default_channel_config();
 +      let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]);
 +      let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +
 +      let (_, _, chan_id, funding_tx) =
 +              create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
 +
 +      nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
 +      let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
 +      let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
 +
 +      let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
 +      let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
 +      nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
 +      let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 +      nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
 +      let (_, _) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 +
 +      let shutdown_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
 +
 +      mine_transaction(&nodes[0], &shutdown_tx[0]);
 +      mine_transaction(&nodes[1], &shutdown_tx[0]);
 +
 +      connect_blocks(&nodes[0], 6);
 +      connect_blocks(&nodes[1], 6);
 +
 +      check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
 +
 +      assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
 +      // First archive should set balances_empty_height to current block height
 +      nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); 
 +      assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1);
 +      connect_blocks(&nodes[0], 4032);
 +      // Second call after 4032 blocks, should archive the monitor
 +      nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors();
 +      // Should have no monitors left
 +      assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 0);
 +      // Remove the corresponding outputs and transactions the chain source is
 +      // watching. This is to make sure the `Drop` function assertions pass.
 +      nodes.get_mut(0).unwrap().chain_source.remove_watched_txn_and_outputs(
 +              OutPoint { txid: funding_tx.txid(), index: 0 },
 +              funding_tx.output[0].script_pubkey.clone()
 +      );
 +}
 +
  fn do_chanmon_claim_value_coop_close(anchors: bool) {
        // Tests `get_claimable_balances` returns the correct values across a simple cooperative claim.
        // Specifically, this tests that the channel non-HTLC balances show up in
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
 -      assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 +      assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
  
        let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64;
        let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id);
        assert_eq!(shutdown_tx, nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0));
        assert_eq!(shutdown_tx.len(), 1);
  
 -      let shutdown_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &shutdown_tx[0]));
 -      let shutdown_tx_conf_height_b = block_from_scid(&mine_transaction(&nodes[1], &shutdown_tx[0]));
 +      let shutdown_tx_conf_height_a = block_from_scid(mine_transaction(&nodes[0], &shutdown_tx[0]));
 +      let shutdown_tx_conf_height_b = block_from_scid(mine_transaction(&nodes[1], &shutdown_tx[0]));
  
        assert!(nodes[0].node.list_channels().is_empty());
        assert!(nodes[1].node.list_channels().is_empty());
                spendable_outputs_b
        );
  
 -      check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
  }
  
  #[test]
@@@ -381,7 -327,7 +381,7 @@@ fn do_test_claim_value_force_close(anch
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
 -      assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 +      assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
  
        // This HTLC is immediately claimed, giving node B the preimage
        let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
@@@ -790,7 -736,7 +790,7 @@@ fn do_test_balances_on_local_commitment
                check_spends!(commitment_tx, funding_tx);
                commitment_tx
        };
 -      let commitment_tx_conf_height_a = block_from_scid(&mine_transaction(&nodes[0], &commitment_tx));
 +      let commitment_tx_conf_height_a = block_from_scid(mine_transaction(&nodes[0], &commitment_tx));
        if nodes[0].connect_style.borrow().updates_best_block_first() {
                let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
                assert_eq!(txn.len(), 1);
@@@ -1175,7 -1121,7 +1175,7 @@@ fn do_test_revoked_counterparty_commitm
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
 -      assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 +      assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
  
        // We create five HTLCs for B to claim against A's revoked commitment transaction:
        //
        assert!(failed_payments.is_empty());
        if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
        match &events[1] {
 -              Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {},
 +              Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {},
                _ => panic!(),
        }
  
        connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 -      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
  
        // Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
        // lists the two on-chain timeout-able HTLCs as claimable balances.
@@@ -1457,7 -1403,7 +1457,7 @@@ fn do_test_revoked_counterparty_htlc_tx
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
 -      assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 +      assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
  
        let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
        let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1;
@@@ -1759,7 -1705,7 +1759,7 @@@ fn do_test_revoked_counterparty_aggrega
        let (_, _, chan_id, funding_tx) =
                create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000);
        let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 };
 -      assert_eq!(funding_outpoint.to_channel_id(), chan_id);
 +      assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id);
  
        // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success
        // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated
@@@ -2626,7 -2572,7 +2626,7 @@@ fn test_anchors_aggregated_revoked_htlc
                        check_spends!(revoked_htlc_claim, htlc_tx);
                }
  
 -              let mut revoked_claim_transaction_map = HashMap::new();
 +              let mut revoked_claim_transaction_map = new_hash_map();
                for current_tx in txn.into_iter() {
                        revoked_claim_transaction_map.insert(current_tx.txid(), current_tx);
                }
@@@ -2728,14 -2674,14 +2728,14 @@@ fn do_test_anchors_monitor_fixes_counte
                // We should expect our round trip serialization check to fail as we're writing the monitor
                // with the incorrect P2WPKH script but reading it with the correct P2WSH script.
                *nodes[1].chain_monitor.expect_monitor_round_trip_fail.lock().unwrap() = Some(chan_id);
 -              let commitment_tx_conf_height = block_from_scid(&mine_transaction(&nodes[1], &commitment_tx));
 +              let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx));
                let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
                reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized);
                commitment_tx_conf_height
        } else {
                let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();
                reload_node!(nodes[1], user_config, &nodes[1].node.encode(), &[&serialized_monitor], persister, chain_monitor, node_deserialized);
 -              let commitment_tx_conf_height = block_from_scid(&mine_transaction(&nodes[1], &commitment_tx));
 +              let commitment_tx_conf_height = block_from_scid(mine_transaction(&nodes[1], &commitment_tx));
                check_added_monitors(&nodes[1], 1);
                check_closed_broadcast(&nodes[1], 1, true);
                commitment_tx_conf_height
@@@ -2809,9 -2755,7 +2809,9 @@@ fn do_test_monitor_claims_with_random_s
                (&nodes[0], &nodes[1])
        };
  
 -      closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id()).unwrap();
 +      get_monitor!(closing_node, chan_id).broadcast_latest_holder_commitment_txn(
 +              &closing_node.tx_broadcaster, &closing_node.fee_estimator, &closing_node.logger
 +      );
  
        // The commitment transaction comes first.
        let commitment_tx = {
        mine_transaction(closing_node, &commitment_tx);
        check_added_monitors!(closing_node, 1);
        check_closed_broadcast!(closing_node, true);
 -      check_closed_event!(closing_node, 1, ClosureReason::HolderForceClosed, [other_node.node.get_our_node_id()], 1_000_000);
 +      check_closed_event!(closing_node, 1, ClosureReason::CommitmentTxConfirmed, [other_node.node.get_our_node_id()], 1_000_000);
  
        mine_transaction(other_node, &commitment_tx);
        check_added_monitors!(other_node, 1);
@@@ -2877,3 -2821,40 +2877,40 @@@ fn test_monitor_claims_with_random_sign
        do_test_monitor_claims_with_random_signatures(true, false);
        do_test_monitor_claims_with_random_signatures(true, true);
  }
+ #[test]
+ fn test_event_replay_causing_monitor_replay() {
+       // In LDK 0.0.121 there was a bug where if a `PaymentSent` event caused an RAA
+       // `ChannelMonitorUpdate` hold and then the node was restarted after the `PaymentSent` event
+       // and `ChannelMonitorUpdate` both completed but without persisting the `ChannelManager` we'd
+       // replay the `ChannelMonitorUpdate` on restart (which is fine, but triggered a safety panic).
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let persister;
+       let new_chain_monitor;
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let node_deserialized;
+       let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
+       let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 1_000_000).0;
+       do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
+       // At this point the `PaymentSent` event has not been processed but the full commitment signed
+       // dance has completed.
+       let serialized_channel_manager = nodes[0].node.encode();
+       // Now process the `PaymentSent` to get the final RAA `ChannelMonitorUpdate`, checking that it
+       // resulted in a `ChannelManager` persistence request.
+       nodes[0].node.get_and_clear_needs_persistence();
+       expect_payment_sent(&nodes[0], payment_preimage, None, true, true /* expected post-event monitor update*/);
+       assert!(nodes[0].node.get_and_clear_needs_persistence());
+       let serialized_monitor = get_monitor!(nodes[0], chan.2).encode();
+       reload_node!(nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, new_chain_monitor, node_deserialized);
+       // Expect the `PaymentSent` to get replayed, this time without the duplicate monitor update
+       expect_payment_sent(&nodes[0], payment_preimage, None, false, false /* expected post-event monitor update*/);
+ }