Note when new HTLC state can be `None`
[rust-lightning] / lightning / src / ln / channel.rs
index d1c91ce1495fbbc636651b75cdc0fab2e62e11f3..31989a011a971b5dc55f170eac5e3de9e99205b2 100644 (file)
@@ -158,6 +158,72 @@ enum InboundHTLCState {
        LocalRemoved(InboundHTLCRemovalReason),
 }
 
+/// Exposes the state of pending inbound HTLCs.
+///
+/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
+/// through the following states in the state machine:
+/// - Announced for addition by the originating node through the update_add_htlc message.
+/// - Added to the commitment transaction of the receiving node and originating node in turn
+///   through the exchange of commitment_signed and revoke_and_ack messages.
+/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
+///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
+/// - Removed from the commitment transaction of the originating node and receiving node in turn
+///   through the exchange of commitment_signed and revoke_and_ack messages.
+///
+/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
+#[derive(Clone, Debug, PartialEq)]
+pub enum InboundHTLCStateDetails {
+       /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
+       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+       /// before this HTLC is included on the remote commitment transaction.
+       AwaitingRemoteRevokeToAdd,
+       /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
+       /// and is included in both commitment transactions.
+       ///
+       /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
+       /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
+       /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
+       /// payment, it will only be claimed together with other required parts.
+       Committed,
+       /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
+       /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
+       /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
+       /// commitment transaction after update_fulfill_htlc.
+       AwaitingRemoteRevokeToRemoveFulfill,
+       /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
+       /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
+       /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
+       /// transaction.
+       AwaitingRemoteRevokeToRemoveFail,
+}
+
+impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
+       fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
+               match state {
+                       InboundHTLCState::RemoteAnnounced(_) => None,
+                       InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
+                               Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+                       InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
+                               Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+                       InboundHTLCState::Committed =>
+                               Some(InboundHTLCStateDetails::Committed),
+                       InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
+                               Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
+                       InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
+                               Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
+                       InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
+                               Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
+               }
+       }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
+       (0, AwaitingRemoteRevokeToAdd) => {},
+       (2, Committed) => {},
+       (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
+       (6, AwaitingRemoteRevokeToRemoveFail) => {};
+);
+
 struct InboundHTLCOutput {
        htlc_id: u64,
        amount_msat: u64,
@@ -166,6 +232,53 @@ struct InboundHTLCOutput {
        state: InboundHTLCState,
 }
 
+/// Exposes details around pending inbound HTLCs.
+#[derive(Clone, Debug, PartialEq)]
+pub struct InboundHTLCDetails {
+       /// The HTLC ID.
+       /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
+       /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
+       /// and not part of any commitment transaction.
+       pub htlc_id: u64,
+       /// The amount in msat.
+       pub amount_msat: u64,
+       /// The block height at which this HTLC expires.
+       pub cltv_expiry: u32,
+       /// The payment hash.
+       pub payment_hash: PaymentHash,
+       /// The state of the HTLC in the state machine.
+       ///
+       /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
+       /// waiting for to advance to the next state.
+       ///
+       /// See [`InboundHTLCStateDetails`] for information on the specific states.
+       ///
+       /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
+       /// states may result in `None` here.
+       pub state: Option<InboundHTLCStateDetails>,
+       /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
+       /// from the local commitment transaction and added to the commitment transaction fee.
+       /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
+       /// transactions as well.
+       ///
+       /// When the local commitment transaction is broadcasted as part of a unilateral closure,
+       /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
+       /// fee.
+       ///
+       /// Note that dust limits are specific to each party. An HTLC can be dust for the local
+       /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
+       pub is_dust: bool,
+}
+
+impl_writeable_tlv_based!(InboundHTLCDetails, {
+       (0, htlc_id, required),
+       (2, amount_msat, required),
+       (4, cltv_expiry, required),
+       (6, payment_hash, required),
+       (7, state, upgradable_option),
+       (8, is_dust, required),
+});
+
 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum OutboundHTLCState {
        /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
@@ -199,6 +312,72 @@ enum OutboundHTLCState {
        AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
 }
 
+/// Exposes the state of pending outbound HTLCs.
+///
+/// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
+/// through the following states in the state machine:
+/// - Announced for addition by the originating node through the update_add_htlc message.
+/// - Added to the commitment transaction of the receiving node and originating node in turn
+///   through the exchange of commitment_signed and revoke_and_ack messages.
+/// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
+///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
+/// - Removed from the commitment transaction of the originating node and receiving node in turn
+///   through the exchange of commitment_signed and revoke_and_ack messages.
+///
+/// This can be used to inspect what next message an HTLC is waiting for to advance its state.
+#[derive(Clone, Debug, PartialEq)]
+pub enum OutboundHTLCStateDetails {
+       /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
+       /// on the remote's commitment transaction after update_add_htlc.
+       AwaitingRemoteRevokeToAdd,
+       /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
+       /// and receiving revoke_and_ack in return.
+       ///
+       /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
+       /// unilaterally close the channel due to a timeout with an uncooperative remote node.
+       Committed,
+       /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
+       /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
+       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+       /// for the removal from its commitment transaction.
+       AwaitingRemoteRevokeToRemoveSuccess,
+       /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
+       /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
+       /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
+       /// for the removal from its commitment transaction.
+       AwaitingRemoteRevokeToRemoveFailure,
+}
+
+impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
+       fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
+               match state {
+                       OutboundHTLCState::LocalAnnounced(_) =>
+                               OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
+                       OutboundHTLCState::Committed =>
+                               OutboundHTLCStateDetails::Committed,
+                       // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
+                       // the state yet.
+                       OutboundHTLCState::RemoteRemoved(_) =>
+                               OutboundHTLCStateDetails::Committed,
+                       OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
+                               OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
+                       OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
+                               OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
+                       OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
+                               OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
+                       OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
+                               OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
+               }
+       }
+}
+
+impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
+       (0, AwaitingRemoteRevokeToAdd) => {},
+       (2, Committed) => {},
+       (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
+       (6, AwaitingRemoteRevokeToRemoveFailure) => {};
+);
+
 #[derive(Clone)]
 #[cfg_attr(test, derive(Debug, PartialEq))]
 enum OutboundHTLCOutcome {
@@ -237,6 +416,58 @@ struct OutboundHTLCOutput {
        skimmed_fee_msat: Option<u64>,
 }
 
+/// Exposes details around pending outbound HTLCs.
+#[derive(Clone, Debug, PartialEq)]
+pub struct OutboundHTLCDetails {
+       /// The HTLC ID.
+       /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
+       /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
+       /// and not part of any commitment transaction.
+       ///
+       /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
+       pub htlc_id: Option<u64>,
+       /// The amount in msat.
+       pub amount_msat: u64,
+       /// The block height at which this HTLC expires.
+       pub cltv_expiry: u32,
+       /// The payment hash.
+       pub payment_hash: PaymentHash,
+       /// The state of the HTLC in the state machine.
+       ///
+       /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
+       /// waiting for to advance to the next state.
+       ///
+       /// See [`OutboundHTLCStateDetails`] for information on the specific states.
+       ///
+       /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
+       /// states may result in `None` here.
+       pub state: Option<OutboundHTLCStateDetails>,
+       /// The extra fee being skimmed off the top of this HTLC.
+       pub skimmed_fee_msat: Option<u64>,
+       /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
+       /// from the local commitment transaction and added to the commitment transaction fee.
+       /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
+       /// transactions as well.
+       ///
+       /// When the local commitment transaction is broadcasted as part of a unilateral closure,
+       /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
+       /// fee.
+       ///
+       /// Note that dust limits are specific to each party. An HTLC can be dust for the local
+       /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
+       pub is_dust: bool,
+}
+
+impl_writeable_tlv_based!(OutboundHTLCDetails, {
+       (0, htlc_id, required),
+       (2, amount_msat, required),
+       (4, cltv_expiry, required),
+       (6, payment_hash, required),
+       (7, state, upgradable_option),
+       (8, skimmed_fee_msat, required),
+       (10, is_dust, required),
+});
+
 /// See AwaitingRemoteRevoke ChannelState for more info
 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
 enum HTLCUpdateAwaitingACK {
@@ -267,7 +498,7 @@ enum HTLCUpdateAwaitingACK {
 }
 
 macro_rules! define_state_flags {
-       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
+       ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
                #[doc = $flag_type_doc]
                #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
                struct $flag_type(u32);
@@ -296,15 +527,18 @@ macro_rules! define_state_flags {
 
                        #[allow(unused)]
                        fn is_empty(&self) -> bool { self.0 == 0 }
-
                        #[allow(unused)]
                        fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
+                       #[allow(unused)]
+                       fn set(&mut self, flag: Self) { *self |= flag }
+                       #[allow(unused)]
+                       fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
                }
 
-               impl core::ops::Not for $flag_type {
-                       type Output = Self;
-                       fn not(self) -> Self::Output { Self(!self.0) }
-               }
+               $(
+                       define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
+               )*
+
                impl core::ops::BitOr for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
@@ -323,8 +557,28 @@ macro_rules! define_state_flags {
        ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
        };
+       ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
+               impl $flag_type {
+                       #[allow(unused)]
+                       fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
+                       #[allow(unused)]
+                       fn $set(&mut self) { self.set($flag_type::new() | $flag) }
+                       #[allow(unused)]
+                       fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
+               }
+       };
        ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
                define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
+
+               define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
+                       is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
+               define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
+                       is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
+               define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
+                       is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
+               define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
+                       is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
+
                impl core::ops::BitOr<FundedStateFlags> for $flag_type {
                        type Output = Self;
                        fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
@@ -371,15 +625,19 @@ define_state_flags!(
        "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
        FundedStateFlags, [
                ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
-                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
+                       until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
+                       is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
                ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
                        somewhere and we should pause sending any outbound messages until they've managed to \
-                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
+                       complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
+                       is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
                ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
                        any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
-                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
+                       message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
+                       is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
                ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
-                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
+                       the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
+                       is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
        ]
 );
 
@@ -387,9 +645,9 @@ define_state_flags!(
        "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
        NegotiatingFundingFlags, [
                ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
-                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
+                       OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
                ("Indicates we have received their `open_channel`/`accept_channel` message.",
-                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
+                       THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
        ]
 );
 
@@ -398,13 +656,16 @@ define_state_flags!(
        FUNDED_STATE, AwaitingChannelReadyFlags, [
                ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
-                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
+                       THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
+                       is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
                ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
                        `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
-                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
+                       OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
+                       is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
                ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
                        is being held until all channels in the batch have received `funding_signed` and have \
-                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
+                       their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
+                       is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
        ]
 );
 
@@ -415,10 +676,13 @@ define_state_flags!(
                        `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
                        messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
                        implicit ACK, so instead we have to hold them away temporarily to be sent later.",
-                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
+                       AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
+                       is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
        ]
 );
 
+// Note that the order of this enum is implicitly defined by where each variant is placed. Take this
+// into account when introducing new states and update `test_channel_state_order` accordingly.
 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
 enum ChannelState {
        /// We are negotiating the parameters required for the channel prior to funding it.
@@ -439,12 +703,12 @@ enum ChannelState {
 }
 
 macro_rules! impl_state_flag {
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
+       ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
                #[allow(unused)]
                fn $get(&self) -> bool {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => flags.is_set($state_flag.into()),
+                                       ChannelState::$state(flags) => flags.$get(),
                                )*
                                _ => false,
                        }
@@ -453,7 +717,7 @@ macro_rules! impl_state_flag {
                fn $set(&mut self) {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => *flags |= $state_flag,
+                                       ChannelState::$state(flags) => flags.$set(),
                                )*
                                _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
                        }
@@ -462,17 +726,17 @@ macro_rules! impl_state_flag {
                fn $clear(&mut self) {
                        match self {
                                $(
-                                       ChannelState::$state(flags) => *flags &= !($state_flag),
+                                       ChannelState::$state(flags) => { let _ = flags.$clear(); },
                                )*
                                _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
                        }
                }
        };
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
-               impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
+       ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
+               impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
        };
-       ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
-               impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
+       ($get: ident, $set: ident, $clear: ident, $state: ident) => {
+               impl_state_flag!($get, $set, $clear, [$state]);
        };
 }
 
@@ -523,35 +787,27 @@ impl ChannelState {
                }
        }
 
-       fn should_force_holding_cell(&self) -> bool {
+       fn can_generate_new_commitment(&self) -> bool {
                match self {
                        ChannelState::ChannelReady(flags) =>
-                               flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
-                                       flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
-                                       flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
+                               !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
+                                       !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
+                                       !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
                        _ => {
-                               debug_assert!(false, "The holding cell is only valid within ChannelReady");
+                               debug_assert!(false, "Can only generate new commitment within ChannelReady");
                                false
                        },
                }
        }
 
-       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
-               FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
-       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
-               FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
-       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
-               FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
-       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
-               FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
-       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
-               AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
-       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
-               AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
-       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
-               AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
-       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
-               ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
+       impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
+       impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
+       impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
+       impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
+       impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
+       impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
+       impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
+       impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
 }
 
 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@@ -1231,6 +1487,9 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
        // We track whether we already emitted a `ChannelReady` event.
        channel_ready_event_emitted: bool,
 
+       /// Some if we initiated to shut down the channel.
+       local_initiated_shutdown: Option<()>,
+
        /// The unique identifier used to re-derive the private key material for the channel through
        /// [`SignerProvider::derive_channel_signer`].
        channel_keys_id: [u8; 32],
@@ -1966,6 +2225,99 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                stats
        }
 
+       /// Returns information on all pending inbound HTLCs.
+       pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
+               let mut holding_cell_states = new_hash_map();
+               for holding_cell_update in self.holding_cell_htlc_updates.iter() {
+                       match holding_cell_update {
+                               HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+                                       holding_cell_states.insert(
+                                               htlc_id,
+                                               InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
+                                       );
+                               },
+                               HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+                                       holding_cell_states.insert(
+                                               htlc_id,
+                                               InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
+                                       );
+                               },
+                               HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
+                                       holding_cell_states.insert(
+                                               htlc_id,
+                                               InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
+                                       );
+                               },
+                               // Outbound HTLC.
+                               HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
+                       }
+               }
+               let mut inbound_details = Vec::new();
+               let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       0
+               } else {
+                       let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+                       dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
+               };
+               let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
+               for htlc in self.pending_inbound_htlcs.iter() {
+                       if let Some(state_details) = (&htlc.state).into() {
+                               inbound_details.push(InboundHTLCDetails{
+                                       htlc_id: htlc.htlc_id,
+                                       amount_msat: htlc.amount_msat,
+                                       cltv_expiry: htlc.cltv_expiry,
+                                       payment_hash: htlc.payment_hash,
+                                       state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
+                                       is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
+                               });
+                       }
+               }
+               inbound_details
+       }
+
+       /// Returns information on all pending outbound HTLCs.
+       pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
+               let mut outbound_details = Vec::new();
+               let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                       0
+               } else {
+                       let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+                       dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
+               };
+               let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
+               for htlc in self.pending_outbound_htlcs.iter() {
+                       outbound_details.push(OutboundHTLCDetails{
+                               htlc_id: Some(htlc.htlc_id),
+                               amount_msat: htlc.amount_msat,
+                               cltv_expiry: htlc.cltv_expiry,
+                               payment_hash: htlc.payment_hash,
+                               skimmed_fee_msat: htlc.skimmed_fee_msat,
+                               state: Some((&htlc.state).into()),
+                               is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
+                       });
+               }
+               for holding_cell_update in self.holding_cell_htlc_updates.iter() {
+                       if let HTLCUpdateAwaitingACK::AddHTLC {
+                               amount_msat,
+                               cltv_expiry,
+                               payment_hash,
+                               skimmed_fee_msat,
+                               ..
+                       } = *holding_cell_update {
+                               outbound_details.push(OutboundHTLCDetails{
+                                       htlc_id: None,
+                                       amount_msat: amount_msat,
+                                       cltv_expiry: cltv_expiry,
+                                       payment_hash: payment_hash,
+                                       skimmed_fee_msat: skimmed_fee_msat,
+                                       state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
+                                       is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
+                               });
+                       }
+               }
+               outbound_details
+       }
+
        /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
        /// Doesn't bother handling the
        /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
@@ -2388,11 +2740,7 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                        // funding transaction, don't return a funding txo (which prevents providing the
                        // monitor update to the user, even if we return one).
                        // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
-                       let generate_monitor_update = match self.channel_state {
-                               ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
-                               _ => false,
-                       };
-                       if generate_monitor_update {
+                       if !self.channel_state.is_pre_funded_state() {
                                self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
                                Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
                                        update_id: self.latest_monitor_update_id,
@@ -2710,7 +3058,7 @@ impl<SP: Deref> Channel<SP> where
        where L::Target: Logger {
                // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
                // (see equivalent if condition there).
-               assert!(self.context.channel_state.should_force_holding_cell());
+               assert!(!self.context.channel_state.can_generate_new_commitment());
                let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
                let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
                self.context.latest_monitor_update_id = mon_update_id;
@@ -2781,7 +3129,7 @@ impl<SP: Deref> Channel<SP> where
                        channel_id: Some(self.context.channel_id()),
                };
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        // Note that this condition is the same as the assertion in
                        // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
                        // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
@@ -2955,7 +3303,7 @@ impl<SP: Deref> Channel<SP> where
                        return Ok(None);
                }
 
-               if self.context.channel_state.should_force_holding_cell() {
+               if !self.context.channel_state.can_generate_new_commitment() {
                        debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
                        force_holding_cell = true;
                }
@@ -3051,12 +3399,12 @@ impl<SP: Deref> Channel<SP> where
                let mut check_reconnection = false;
                match &self.context.channel_state {
                        ChannelState::AwaitingChannelReady(flags) => {
-                               let flags = *flags & !FundedStateFlags::ALL;
+                               let flags = flags.clone().clear(FundedStateFlags::ALL.into());
                                debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
-                               if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
+                               if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
                                        // If we reconnected before sending our `channel_ready` they may still resend theirs.
                                        check_reconnection = true;
-                               } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
+                               } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
                                        self.context.channel_state.set_their_channel_ready();
                                } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
                                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
@@ -3306,7 +3654,7 @@ impl<SP: Deref> Channel<SP> where
                Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
-       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+       pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
                if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
                        return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
@@ -3314,7 +3662,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
-               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+               self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
        }
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
@@ -3573,7 +3921,7 @@ impl<SP: Deref> Channel<SP> where
        ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
        where F::Target: FeeEstimator, L::Target: Logger
        {
-               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
+               if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
                        self.free_holding_cell_htlcs(fee_estimator, logger)
                } else { (None, Vec::new()) }
        }
@@ -4185,8 +4533,8 @@ impl<SP: Deref> Channel<SP> where
                // first received the funding_signed.
                let mut funding_broadcastable =
                        if self.context.is_outbound() &&
-                               matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
-                               matches!(self.context.channel_state, ChannelState::ChannelReady(_))
+                               (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
+                               matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
                        {
                                self.context.funding_transaction.take()
                        } else { None };
@@ -4938,11 +5286,17 @@ impl<SP: Deref> Channel<SP> where
                        }
                }
 
+               let closure_reason = if self.initiated_shutdown() {
+                       ClosureReason::LocallyInitiatedCooperativeClosure
+               } else {
+                       ClosureReason::CounterpartyInitiatedCooperativeClosure
+               };
+
                assert!(self.context.shutdown_scriptpubkey.is_some());
                if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
                        if last_fee == msg.fee_satoshis {
                                let shutdown_result = ShutdownResult {
-                                       closure_reason: ClosureReason::CooperativeClosure,
+                                       closure_reason,
                                        monitor_update: None,
                                        dropped_outbound_htlcs: Vec::new(),
                                        unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
@@ -4977,7 +5331,7 @@ impl<SP: Deref> Channel<SP> where
                                                        .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                                let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
                                                        let shutdown_result = ShutdownResult {
-                                                               closure_reason: ClosureReason::CooperativeClosure,
+                                                               closure_reason,
                                                                monitor_update: None,
                                                                dropped_outbound_htlcs: Vec::new(),
                                                                unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
@@ -5197,7 +5551,7 @@ impl<SP: Deref> Channel<SP> where
                if !self.is_awaiting_monitor_update() { return false; }
                if matches!(
                        self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
-                       if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
+                       if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
                ) {
                        // If we're not a 0conf channel, we'll be waiting on a monitor update with only
                        // AwaitingChannelReady set, though our peer could have sent their channel_ready.
@@ -5242,6 +5596,11 @@ impl<SP: Deref> Channel<SP> where
                self.context.channel_state.is_local_shutdown_sent()
        }
 
+       /// Returns true if we initiated to shut down the channel.
+       pub fn initiated_shutdown(&self) -> bool {
+               self.context.local_initiated_shutdown.is_some()
+       }
+
        /// Returns true if this channel is fully shut down. True here implies that no further actions
        /// may/will be taken on this channel, and thus this object should be freed. Any future changes
        /// will be handled appropriately by the chain monitor.
@@ -5283,14 +5642,14 @@ impl<SP: Deref> Channel<SP> where
 
                // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
                // channel_ready until the entire batch is ready.
-               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
+               let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
                        self.context.channel_state.set_our_channel_ready();
                        true
-               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
                        self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
                        self.context.update_time_counter += 1;
                        true
-               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
+               } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
                        // We got a reorg but not enough to trigger a force close, just ignore.
                        false
                } else {
@@ -5865,7 +6224,7 @@ impl<SP: Deref> Channel<SP> where
                        return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
                }
 
-               let need_holding_cell = self.context.channel_state.should_force_holding_cell();
+               let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
                log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
                        payment_hash, amount_msat,
                        if force_holding_cell { "into holding cell" }
@@ -6156,6 +6515,7 @@ impl<SP: Deref> Channel<SP> where
                // From here on out, we may not fail!
                self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
                self.context.channel_state.set_local_shutdown_sent();
+               self.context.local_initiated_shutdown = Some(());
                self.context.update_time_counter += 1;
 
                let monitor_update = if update_shutdown_script {
@@ -6410,12 +6770,13 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
                                channel_ready_event_emitted: false,
 
                                #[cfg(any(test, fuzzing))]
-                               historical_inbound_htlc_fulfills: HashSet::new(),
+                               historical_inbound_htlc_fulfills: new_hash_set(),
 
                                channel_type,
                                channel_keys_id,
 
                                blocked_monitor_updates: Vec::new(),
+                               local_initiated_shutdown: None,
                        },
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
                })
@@ -7211,11 +7572,13 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
                                channel_ready_event_emitted: false,
 
                                #[cfg(any(test, fuzzing))]
-                               historical_inbound_htlc_fulfills: HashSet::new(),
+                               historical_inbound_htlc_fulfills: new_hash_set(),
 
                                channel_type,
                                channel_keys_id,
 
+                               local_initiated_shutdown: None,
+
                                blocked_monitor_updates: Vec::new(),
                        },
                        unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
@@ -7485,6 +7848,8 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        let mut channel_state = self.context.channel_state;
                        if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
                                channel_state.set_peer_disconnected();
+                       } else {
+                               debug_assert!(false, "Pre-funded/shutdown channels should not be written");
                        }
                        channel_state.to_u32().write(writer)?;
                }
@@ -7788,6 +8153,7 @@ impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
                        (39, pending_outbound_blinding_points, optional_vec),
                        (41, holding_cell_blinding_points, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
+                       (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
                });
 
                Ok(())
@@ -8025,7 +8391,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                let channel_update_status = Readable::read(reader)?;
 
                #[cfg(any(test, fuzzing))]
-               let mut historical_inbound_htlc_fulfills = HashSet::new();
+               let mut historical_inbound_htlc_fulfills = new_hash_set();
                #[cfg(any(test, fuzzing))]
                {
                        let htlc_fulfills_len: u64 = Readable::read(reader)?;
@@ -8075,6 +8441,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
 
                let mut is_batch_funding: Option<()> = None;
 
+               let mut local_initiated_shutdown: Option<()> = None;
+
                let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
                let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
 
@@ -8109,6 +8477,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                        (39, pending_outbound_blinding_points_opt, optional_vec),
                        (41, holding_cell_blinding_points_opt, optional_vec),
                        (43, malformed_htlcs, optional_vec), // Added in 0.0.119
+                       (45, local_initiated_shutdown, option),
                });
 
                let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
@@ -8339,6 +8708,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
                                channel_type: channel_type.unwrap(),
                                channel_keys_id,
 
+                               local_initiated_shutdown,
+
                                blocked_monitor_updates: blocked_monitor_updates.unwrap(),
                        }
                })
@@ -8386,6 +8757,18 @@ mod tests {
        use bitcoin::address::{WitnessProgram, WitnessVersion};
        use crate::prelude::*;
 
+       #[test]
+       fn test_channel_state_order() {
+               use crate::ln::channel::NegotiatingFundingFlags;
+               use crate::ln::channel::AwaitingChannelReadyFlags;
+               use crate::ln::channel::ChannelReadyFlags;
+
+               assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
+               assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
+               assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
+               assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
+       }
+
        struct TestFeeEstimator {
                fee_est: u32
        }
@@ -8893,17 +9276,34 @@ mod tests {
        fn blinding_point_skimmed_fee_malformed_ser() {
                // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
                // properly.
+               let logger = test_utils::TestLogger::new();
                let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
+               let best_block = BestBlock::from_network(network);
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
                let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
                let config = UserConfig::default();
                let features = channelmanager::provided_init_features(&config);
-               let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
-               let mut chan = Channel { context: outbound_chan.context };
+               let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
+               ).unwrap();
+               let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
+                       &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
+                       &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
+               ).unwrap();
+               outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
+               let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                       value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
+               }]};
+               let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
+               let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
+               let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
+                       Ok((chan, _, _)) => chan,
+                       Err((_, e)) => panic!("{}", e),
+               };
 
                let dummy_htlc_source = HTLCSource::OutboundRoute {
                        path: Path {