Merge pull request #3125 from valentinewallace/2024-06-async-payments-prefactor
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 7bd395f999aa30da89dbdd1521dfbf8a864d7c3f..6ba7396ebfe04262904640c6b041d6969e95195f 100644 (file)
@@ -21,7 +21,7 @@ use bitcoin::blockdata::block::Header;
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::constants::ChainHash;
 use bitcoin::key::constants::SECRET_KEY_SIZE;
-use bitcoin::network::constants::Network;
+use bitcoin::network::Network;
 
 use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
@@ -31,8 +31,9 @@ use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::{secp256k1, Sequence};
 
-use crate::blinded_path::BlindedPath;
-use crate::blinded_path::payment::{PaymentConstraints, ReceiveTlvs};
+use crate::blinded_path::{BlindedPath, NodeIdLookUp};
+use crate::blinded_path::message::ForwardNode;
+use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, ReceiveTlvs};
 use crate::chain;
 use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
 use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
@@ -42,9 +43,10 @@ use crate::events;
 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
-use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
+use crate::ln::inbound_payment;
+use crate::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
 use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
-pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
+use crate::ln::channel_state::ChannelDetails;
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
@@ -56,19 +58,18 @@ use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
 use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
 #[cfg(test)]
 use crate::ln::outbound_payment;
-use crate::ln::outbound_payment::{Bolt12PaymentError, OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
+use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment, SendAlongPathArgs, StaleExpiration};
 use crate::ln::wire::Encode;
 use crate::offers::invoice::{BlindedPayInfo, Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
 use crate::offers::invoice_error::InvoiceError;
 use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder};
-use crate::offers::merkle::SignError;
 use crate::offers::offer::{Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
 use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
 use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
-use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
+use crate::sign::ecdsa::EcdsaChannelSigner;
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
@@ -76,6 +77,7 @@ use crate::util::string::UntrustedString;
 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
 use crate::util::logger::{Level, Logger, WithContext};
 use crate::util::errors::APIError;
+
 #[cfg(not(c_bindings))]
 use {
        crate::offers::offer::DerivedMetadata,
@@ -103,7 +105,7 @@ use core::time::Duration;
 use core::ops::Deref;
 
 // Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::{PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
+pub use crate::ln::outbound_payment::{Bolt12PaymentError, PaymentSendFailure, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
 use crate::ln::script::ShutdownScript;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
@@ -156,6 +158,11 @@ pub enum PendingHTLCRouting {
                /// [`Event::PaymentClaimable::onion_fields`] as
                /// [`RecipientOnionFields::payment_metadata`].
                payment_metadata: Option<Vec<u8>>,
+               /// The context of the payment included by the recipient in a blinded path, or `None` if a
+               /// blinded path was not used.
+               ///
+               /// Used in part to determine the [`events::PaymentPurpose`].
+               payment_context: Option<PaymentContext>,
                /// CLTV expiry of the received HTLC.
                ///
                /// Used to track when we should expire pending HTLCs that go unclaimed.
@@ -200,6 +207,8 @@ pub enum PendingHTLCRouting {
                /// For HTLCs received by LDK, these will ultimately bubble back up as
                /// [`RecipientOnionFields::custom_tlvs`].
                custom_tlvs: Vec<(u64, Vec<u8>)>,
+               /// Set if this HTLC is the final hop in a multi-hop blinded path.
+               requires_blinded_error: bool,
        },
 }
 
@@ -221,6 +230,7 @@ impl PendingHTLCRouting {
                match self {
                        Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
                        Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+                       Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
                        _ => None,
                }
        }
@@ -626,7 +636,7 @@ impl MsgHandleErrInternal {
                                        err: msg,
                                        action: msgs::ErrorAction::IgnoreError,
                                },
-                               ChannelError::Close(msg) => LightningError {
+                               ChannelError::Close((msg, _reason)) => LightningError {
                                        err: msg.clone(),
                                        action: msgs::ErrorAction::SendErrorMessage {
                                                msg: msgs::ErrorMessage {
@@ -671,6 +681,7 @@ struct ClaimingPayment {
        receiver_node_id: PublicKey,
        htlcs: Vec<events::ClaimedHTLC>,
        sender_intended_value: Option<u64>,
+       onion_fields: Option<RecipientOnionFields>,
 }
 impl_writeable_tlv_based!(ClaimingPayment, {
        (0, amount_msat, required),
@@ -678,6 +689,7 @@ impl_writeable_tlv_based!(ClaimingPayment, {
        (4, receiver_node_id, required),
        (5, htlcs, optional_vec),
        (7, sender_intended_value, option),
+       (9, onion_fields, option),
 });
 
 struct ClaimablePayment {
@@ -900,7 +912,7 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
-       is_connected: bool,
+       pub is_connected: bool,
 }
 
 impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -915,9 +927,9 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                        match phase {
                                ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
                                ChannelPhase::UnfundedInboundV1(_) => false,
-                               #[cfg(dual_funding)]
+                               #[cfg(any(dual_funding, splicing))]
                                ChannelPhase::UnfundedOutboundV2(_) => true,
-                               #[cfg(dual_funding)]
+                               #[cfg(any(dual_funding, splicing))]
                                ChannelPhase::UnfundedInboundV2(_) => false,
                        }
                )
@@ -950,6 +962,11 @@ pub(super) struct InboundChannelRequest {
 /// accepted. An unaccepted channel that exceeds this limit will be abandoned.
 const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
 
+/// The number of blocks of historical feerate estimates we keep around and consider when deciding
+/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
+/// after startup before we consider force-closing channels for having too-low fees.
+pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
+
 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
 /// actually ours and not some duplicate HTLC sent to us by a node along the route.
 ///
@@ -1052,8 +1069,8 @@ pub trait AChannelManager {
        type NodeSigner: NodeSigner + ?Sized;
        /// A type that may be dereferenced to [`Self::NodeSigner`].
        type NS: Deref<Target = Self::NodeSigner>;
-       /// A type implementing [`WriteableEcdsaChannelSigner`].
-       type Signer: WriteableEcdsaChannelSigner + Sized;
+       /// A type implementing [`EcdsaChannelSigner`].
+       type Signer: EcdsaChannelSigner + Sized;
        /// A type implementing [`SignerProvider`] for [`Self::Signer`].
        type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
        /// A type that may be dereferenced to [`Self::SignerProvider`].
@@ -1106,11 +1123,631 @@ where
        fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
 }
 
-/// Manager which keeps track of a number of channels and sends messages to the appropriate
-/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+/// A lightning node's channel state machine and payment management logic, which facilitates
+/// sending, forwarding, and receiving payments through lightning channels.
+///
+/// [`ChannelManager`] is parameterized by a number of components to achieve this.
+/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
+///   channel
+/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
+///   closing channels
+/// - [`EntropySource`] for providing random data needed for cryptographic operations
+/// - [`NodeSigner`] for cryptographic operations scoped to the node
+/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
+/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
+///   timely manner
+/// - [`Router`] for finding payment paths when initiating and retrying payments
+/// - [`Logger`] for logging operational information of varying degrees
+///
+/// Additionally, it implements the following traits:
+/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
+/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
+/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
+/// - [`EventsProvider`] to generate user-actionable [`Event`]s
+/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
+///
+/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
+/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
+///
+/// # `ChannelManager` vs `ChannelMonitor`
+///
+/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
+/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
+/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
+/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
+/// [`chain::Watch`] of them.
+///
+/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
+/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
+/// for any pertinent on-chain activity, enforcing claims as needed.
+///
+/// This division of off-chain management and on-chain enforcement allows for interesting node
+/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
+/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
+///
+/// # Initialization
+///
+/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
+/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
+/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
+/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
+/// detailed in the [`ChannelManagerReadArgs`] documentation.
+///
+/// ```
+/// use bitcoin::BlockHash;
+/// use bitcoin::network::Network;
+/// use lightning::chain::BestBlock;
+/// # use lightning::chain::channelmonitor::ChannelMonitor;
+/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
+/// # use lightning::routing::gossip::NetworkGraph;
+/// use lightning::util::config::UserConfig;
+/// use lightning::util::ser::ReadableArgs;
+///
+/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
+/// # fn example<
+/// #     'a,
+/// #     L: lightning::util::logger::Logger,
+/// #     ES: lightning::sign::EntropySource,
+/// #     S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
+/// #     SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
+/// #     SP: Sized,
+/// #     R: lightning::io::Read,
+/// # >(
+/// #     fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
+/// #     chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
+/// #     tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
+/// #     router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
+/// #     logger: &L,
+/// #     entropy_source: &ES,
+/// #     node_signer: &dyn lightning::sign::NodeSigner,
+/// #     signer_provider: &lightning::sign::DynSignerProvider,
+/// #     best_block: lightning::chain::BestBlock,
+/// #     current_timestamp: u32,
+/// #     mut reader: R,
+/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
+/// // Fresh start with no channels
+/// let params = ChainParameters {
+///     network: Network::Bitcoin,
+///     best_block,
+/// };
+/// let default_config = UserConfig::default();
+/// let channel_manager = ChannelManager::new(
+///     fee_estimator, chain_monitor, tx_broadcaster, router, logger, entropy_source, node_signer,
+///     signer_provider, default_config, params, current_timestamp
+/// );
+///
+/// // Restart from deserialized data
+/// let mut channel_monitors = read_channel_monitors();
+/// let args = ChannelManagerReadArgs::new(
+///     entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
+///     router, logger, default_config, channel_monitors.iter_mut().collect()
+/// );
+/// let (block_hash, channel_manager) =
+///     <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
+///
+/// // Update the ChannelManager and ChannelMonitors with the latest chain data
+/// // ...
+///
+/// // Move the monitors to the ChannelManager's chain::Watch parameter
+/// for monitor in channel_monitors {
+///     chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+/// }
+/// # Ok(())
+/// # }
+/// ```
+///
+/// # Operation
+///
+/// The following is required for [`ChannelManager`] to function properly:
+/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
+///   called by [`PeerManager::read_event`] when processing network I/O)
+/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
+///   (typically initiated when [`PeerManager::process_events`] is called)
+/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
+///   as documented by those traits
+/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
+///   every minute
+/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
+///   [`Persister`] such as a [`KVStore`] implementation
+/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
+///
+/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
+/// when the last two requirements need to be checked.
+///
+/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
+/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
+/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
+/// crate. For languages other than Rust, the availability of similar utilities may vary.
+///
+/// # Channels
+///
+/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
+/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
+/// currently open channels.
+///
+/// ```
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let channels = channel_manager.list_usable_channels();
+/// for details in channels {
+///     println!("{:?}", details);
+/// }
+/// # }
+/// ```
+///
+/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
+/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
+/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
+/// by [`ChannelManager`].
+///
+/// ## Opening Channels
+///
+/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
+/// opening an outbound channel, which requires self-funding when handling
+/// [`Event::FundingGenerationReady`].
+///
+/// ```
+/// # use bitcoin::{ScriptBuf, Transaction};
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # trait Wallet {
+/// #     fn create_funding_transaction(
+/// #         &self, _amount_sats: u64, _output_script: ScriptBuf
+/// #     ) -> Transaction;
+/// # }
+/// #
+/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let value_sats = 1_000_000;
+/// let push_msats = 10_000_000;
+/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
+///     Ok(channel_id) => println!("Opening channel {}", channel_id),
+///     Err(e) => println!("Error opening channel: {:?}", e),
+/// }
+///
+/// // On the event processing thread once the peer has responded
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::FundingGenerationReady {
+///         temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
+///         user_channel_id, ..
+///     } => {
+///         assert_eq!(user_channel_id, 42);
+///         let funding_transaction = wallet.create_funding_transaction(
+///             channel_value_satoshis, output_script
+///         );
+///         match channel_manager.funding_transaction_generated(
+///             &temporary_channel_id, &counterparty_node_id, funding_transaction
+///         ) {
+///             Ok(()) => println!("Funding channel {}", temporary_channel_id),
+///             Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
+///         }
+///     },
+///     Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
+///         assert_eq!(user_channel_id, 42);
+///         println!(
+///             "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
+///             former_temporary_channel_id.unwrap()
+///         );
+///     },
+///     Event::ChannelReady { channel_id, user_channel_id, .. } => {
+///         assert_eq!(user_channel_id, 42);
+///         println!("Channel {} ready", channel_id);
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Accepting Channels
+///
+/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
+/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
+/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
+/// #     // ...
+/// #     unimplemented!()
+/// # }
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// # let error_message = "Channel force-closed";
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
+///         if !is_trusted(counterparty_node_id) {
+///             match channel_manager.force_close_without_broadcasting_txn(
+///                 &temporary_channel_id, &counterparty_node_id, error_message.to_string()
+///             ) {
+///                 Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
+///                 Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
+///             }
+///             return;
+///         }
+///
+///         let user_channel_id = 43;
+///         match channel_manager.accept_inbound_channel(
+///             &temporary_channel_id, &counterparty_node_id, user_channel_id
+///         ) {
+///             Ok(()) => println!("Accepting channel {}", temporary_channel_id),
+///             Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
+///         }
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## Closing Channels
+///
+/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
+/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
+/// lower fees and immediate access to funds. However, the latter may be necessary if the
+/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
+/// once the channel has been closed successfully.
+///
+/// ```
+/// # use bitcoin::secp256k1::PublicKey;
+/// # use lightning::ln::types::ChannelId;
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::events::{Event, EventsProvider};
+/// #
+/// # fn example<T: AChannelManager>(
+/// #     channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
+///     Ok(()) => println!("Closing channel {}", channel_id),
+///     Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
+/// }
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::ChannelClosed { channel_id, user_channel_id, ..  } => {
+///         assert_eq!(user_channel_id, 42);
+///         println!("Channel {} closed", channel_id);
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Payments
+///
+/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
+/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
+/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
+/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
+/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
+/// HTLCs.
+///
+/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
+/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
+/// for a payment will be retried according to the payment's [`Retry`] strategy or until
+/// [`abandon_payment`] is called.
+///
+/// ## BOLT 11 Invoices
+///
+/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. Specifically, use the
+/// functions in its `utils` module for constructing invoices that are compatible with
+/// [`ChannelManager`]. These functions serve as a convenience for building invoices with the
+/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
+/// own [`PaymentHash`], use [`create_inbound_payment_for_hash`] or the corresponding functions in
+/// the [`lightning-invoice`] `utils` module.
+///
+/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
+/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
+/// an [`Event::PaymentClaimed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // Or use utils::create_invoice_from_channelmanager
+/// let known_payment_hash = match channel_manager.create_inbound_payment(
+///     Some(10_000_000), 3600, None
+/// ) {
+///     Ok((payment_hash, _payment_secret)) => {
+///         println!("Creating inbound payment {}", payment_hash);
+///         payment_hash
+///     },
+///     Err(()) => panic!("Error creating inbound payment"),
+/// };
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+///         PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
+///             assert_eq!(payment_hash, known_payment_hash);
+///             println!("Claiming payment {}", payment_hash);
+///             channel_manager.claim_funds(payment_preimage);
+///         },
+///         PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
+///             println!("Unknown payment hash: {}", payment_hash);
+///         },
+///         PaymentPurpose::SpontaneousPayment(payment_preimage) => {
+///             assert_ne!(payment_hash, known_payment_hash);
+///             println!("Claiming spontaneous payment {}", payment_hash);
+///             channel_manager.claim_funds(payment_preimage);
+///         },
+///         // ...
+/// #         _ => {},
+///     },
+///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+///         assert_eq!(payment_hash, known_payment_hash);
+///         println!("Claimed {} msats", amount_msat);
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// For paying an invoice, [`lightning-invoice`] provides a `payment` module with convenience
+/// functions for use with [`send_payment`].
 ///
-/// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
-/// to individual Channels.
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::types::PaymentHash;
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
+/// # use lightning::routing::router::RouteParameters;
+/// #
+/// # fn example<T: AChannelManager>(
+/// #     channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+/// #     route_params: RouteParameters, retry: Retry
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// // let (payment_hash, recipient_onion, route_params) =
+/// //     payment::payment_parameters_from_invoice(&invoice);
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.send_payment(
+///     payment_hash, recipient_onion, payment_id, route_params, retry
+/// ) {
+///     Ok(()) => println!("Sending payment with hash {}", payment_hash),
+///     Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
+/// }
+///
+/// let expected_payment_id = payment_id;
+/// let expected_payment_hash = payment_hash;
+/// assert!(
+///     channel_manager.list_recent_payments().iter().find(|details| matches!(
+///         details,
+///         RecentPaymentDetails::Pending {
+///             payment_id: expected_payment_id,
+///             payment_hash: expected_payment_hash,
+///             ..
+///         }
+///     )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
+///     Event::PaymentFailed { payment_hash, .. } => println!("Failed paying {}", payment_hash),
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Offers
+///
+/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
+/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
+/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
+/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
+/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
+/// stateless just as BOLT 11 invoices are.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// # let absolute_expiry = None;
+/// let offer = channel_manager
+///     .create_offer_builder(absolute_expiry)?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
+/// # let offer = builder
+///     .description("coffee".to_string())
+///     .amount_msats(10_000_000)
+///     .build()?;
+/// let bech32_offer = offer.to_string();
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+///         PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
+///             println!("Claiming payment {}", payment_hash);
+///             channel_manager.claim_funds(payment_preimage);
+///         },
+///         PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
+///             println!("Unknown payment hash: {}", payment_hash);
+///         },
+///         // ...
+/// #         _ => {},
+///     },
+///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+///         println!("Claimed {} msats", amount_msat);
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
+/// and pays the [`Bolt12Invoice`] response. In addition to success and failure events,
+/// [`ChannelManager`] may also generate an [`Event::InvoiceRequestFailed`].
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::offer::Offer;
+/// #
+/// # fn example<T: AChannelManager>(
+/// #     channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
+/// #     payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
+/// # ) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// match channel_manager.pay_for_offer(
+///     offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
+/// ) {
+///     Ok(()) => println!("Requesting invoice for offer"),
+///     Err(e) => println!("Unable to request invoice for offer: {:?}", e),
+/// }
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+///     channel_manager.list_recent_payments().iter().find(|details| matches!(
+///         details,
+///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+///     )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+///     channel_manager.list_recent_payments().iter().find(|details| matches!(
+///         details,
+///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
+///     )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+///     Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+///     Event::InvoiceRequestFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// ## BOLT 12 Refunds
+///
+/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
+/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
+/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
+/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
+///
+/// ```
+/// # use core::time::Duration;
+/// # use lightning::events::{Event, EventsProvider};
+/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
+/// # use lightning::offers::parse::Bolt12SemanticError;
+/// #
+/// # fn example<T: AChannelManager>(
+/// #     channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
+/// #     max_total_routing_fee_msat: Option<u64>
+/// # ) -> Result<(), Bolt12SemanticError> {
+/// # let channel_manager = channel_manager.get_cm();
+/// let payment_id = PaymentId([42; 32]);
+/// let refund = channel_manager
+///     .create_refund_builder(
+///         amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat
+///     )?
+/// # ;
+/// # // Needed for compiling for c_bindings
+/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
+/// # let refund = builder
+///     .description("coffee".to_string())
+///     .payer_note("refund for order 1234".to_string())
+///     .build()?;
+/// let bech32_refund = refund.to_string();
+///
+/// // First the payment will be waiting on an invoice
+/// let expected_payment_id = payment_id;
+/// assert!(
+///     channel_manager.list_recent_payments().iter().find(|details| matches!(
+///         details,
+///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
+///     )).is_some()
+/// );
+///
+/// // Once the invoice is received, a payment will be sent
+/// assert!(
+///     channel_manager.list_recent_payments().iter().find(|details| matches!(
+///         details,
+///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
+///     )).is_some()
+/// );
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
+///     Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
+///     // ...
+/// #     _ => {},
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
+/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
+///
+/// ```
+/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
+/// # use lightning::ln::channelmanager::AChannelManager;
+/// # use lightning::offers::refund::Refund;
+/// #
+/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
+/// # let channel_manager = channel_manager.get_cm();
+/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
+///     Ok(invoice) => {
+///         let payment_hash = invoice.payment_hash();
+///         println!("Requesting refund payment {}", payment_hash);
+///         payment_hash
+///     },
+///     Err(e) => panic!("Unable to request payment for refund: {:?}", e),
+/// };
+///
+/// // On the event processing thread
+/// channel_manager.process_pending_events(&|event| match event {
+///     Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
+///            PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
+///             assert_eq!(payment_hash, known_payment_hash);
+///             println!("Claiming payment {}", payment_hash);
+///             channel_manager.claim_funds(payment_preimage);
+///         },
+///            PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
+///             println!("Unknown payment hash: {}", payment_hash);
+///            },
+///         // ...
+/// #         _ => {},
+///     },
+///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
+///         assert_eq!(payment_hash, known_payment_hash);
+///         println!("Claimed {} msats", amount_msat);
+///     },
+///     // ...
+/// #     _ => {},
+/// });
+/// # }
+/// ```
+///
+/// # Persistence
 ///
 /// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
 /// all peers during write/read (though does not modify this instance, only the instance being
@@ -1131,12 +1768,16 @@ where
 /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
 /// See [`chain::Listen`] and [`chain::Confirm`] for more details.
 ///
+/// # `ChannelUpdate` Messages
+///
 /// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
 /// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
 /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
 /// offline for a full minute. In order to track this, you must call
 /// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
 ///
+/// # DoS Mitigation
+///
 /// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
 /// inbound channels without confirmed funding transactions. This may result in nodes which we do
 /// not have a channel with being unable to connect to us or open new channels with us if we have
@@ -1146,19 +1787,53 @@ where
 /// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
 /// never limited. Please ensure you limit the count of such channels yourself.
 ///
+/// # Type Aliases
+///
 /// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
 /// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
 /// essentially you should default to using a [`SimpleRefChannelManager`], and use a
 /// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
 /// you're using lightning-net-tokio.
 ///
+/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
+/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
+/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
+/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
+/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+/// [`timer_tick_occurred`]: Self::timer_tick_occurred
+/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
+/// [`Persister`]: crate::util::persist::Persister
+/// [`KVStore`]: crate::util::persist::KVStore
+/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
+/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
+/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
+/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
+/// [`list_channels`]: Self::list_channels
+/// [`list_usable_channels`]: Self::list_usable_channels
+/// [`create_channel`]: Self::create_channel
+/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
+/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
+/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
+/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
+/// [`list_recent_payments`]: Self::list_recent_payments
+/// [`abandon_payment`]: Self::abandon_payment
+/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
+/// [`create_inbound_payment`]: Self::create_inbound_payment
+/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+/// [`claim_funds`]: Self::claim_funds
+/// [`send_payment`]: Self::send_payment
+/// [`offers`]: crate::offers
+/// [`create_offer_builder`]: Self::create_offer_builder
+/// [`pay_for_offer`]: Self::pay_for_offer
+/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
+/// [`create_refund_builder`]: Self::create_refund_builder
+/// [`request_refund_payment`]: Self::request_refund_payment
 /// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
 /// [`funding_created`]: msgs::FundingCreated
 /// [`funding_transaction_generated`]: Self::funding_transaction_generated
 /// [`BlockHash`]: bitcoin::hash_types::BlockHash
 /// [`update_channel`]: chain::Watch::update_channel
 /// [`ChannelUpdate`]: msgs::ChannelUpdate
-/// [`timer_tick_occurred`]: Self::timer_tick_occurred
 /// [`read`]: ReadableArgs::read
 //
 // Lock order:
@@ -1178,6 +1853,8 @@ where
 //  |   |
 //  |   |__`pending_intercepted_htlcs`
 //  |
+//  |__`decode_update_add_htlcs`
+//  |
 //  |__`per_peer_state`
 //      |
 //      |__`pending_inbound_payments`
@@ -1268,6 +1945,18 @@ where
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
 
+       /// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
+       ///
+       /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+       /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+       /// and via the classic SCID.
+       ///
+       /// Note that no consistency guarantees are made about the existence of a channel with the
+       /// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
+
        /// The sets of payments which are claimable or currently being claimed. See
        /// [`ClaimablePayments`]' individual field docs for more info.
        ///
@@ -1411,6 +2100,24 @@ where
 
        pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
 
+       /// Tracks the message events that are to be broadcasted when we are connected to some peer.
+       pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
+
+       /// We only want to force-close our channels on peers based on stale feerates when we're
+       /// confident the feerate on the channel is *really* stale, not just became stale recently.
+       /// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
+       /// (after startup completed) here, and only force-close when channels have a lower feerate
+       /// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
+       ///
+       /// We only keep this in memory as we assume any feerates we receive immediately after startup
+       /// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
+       /// actions for a day anyway.
+       ///
+       /// The first element in the pair is the
+       /// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
+       /// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
+       last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
+
        entropy_source: ES,
        node_signer: NS,
        signer_provider: SP,
@@ -1601,337 +2308,18 @@ const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
 /// many peers we reject new (inbound) connections.
 const MAX_NO_CHANNEL_PEERS: usize = 250;
 
-/// Information needed for constructing an invoice route hint for this channel.
-#[derive(Clone, Debug, PartialEq)]
-pub struct CounterpartyForwardingInfo {
-       /// Base routing fee in millisatoshis.
-       pub fee_base_msat: u32,
-       /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
-       pub fee_proportional_millionths: u32,
-       /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
-       /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
-       /// `cltv_expiry_delta` for more details.
-       pub cltv_expiry_delta: u16,
-}
-
-/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
-/// to better separate parameters.
-#[derive(Clone, Debug, PartialEq)]
-pub struct ChannelCounterparty {
-       /// The node_id of our counterparty
-       pub node_id: PublicKey,
-       /// The Features the channel counterparty provided upon last connection.
-       /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
-       /// many routing-relevant features are present in the init context.
-       pub features: InitFeatures,
-       /// The value, in satoshis, that must always be held in the channel for our counterparty. This
-       /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
-       /// claiming at least this value on chain.
-       ///
-       /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
-       ///
-       /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
-       pub unspendable_punishment_reserve: u64,
-       /// Information on the fees and requirements that the counterparty requires when forwarding
-       /// payments to us through this channel.
-       pub forwarding_info: Option<CounterpartyForwardingInfo>,
-       /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field
-       /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message
-       /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107.
-       pub outbound_htlc_minimum_msat: Option<u64>,
-       /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel.
-       pub outbound_htlc_maximum_msat: Option<u64>,
-}
-
-/// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
-#[derive(Clone, Debug, PartialEq)]
-pub struct ChannelDetails {
-       /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
-       /// thereafter this is the txid of the funding transaction xor the funding transaction output).
-       /// Note that this means this value is *not* persistent - it can change once during the
-       /// lifetime of the channel.
-       pub channel_id: ChannelId,
-       /// Parameters which apply to our counterparty. See individual fields for more information.
-       pub counterparty: ChannelCounterparty,
-       /// The Channel's funding transaction output, if we've negotiated the funding transaction with
-       /// our counterparty already.
-       pub funding_txo: Option<OutPoint>,
-       /// The features which this channel operates with. See individual features for more info.
-       ///
-       /// `None` until negotiation completes and the channel type is finalized.
-       pub channel_type: Option<ChannelTypeFeatures>,
-       /// The position of the funding transaction in the chain. None if the funding transaction has
-       /// not yet been confirmed and the channel fully opened.
-       ///
-       /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
-       /// payments instead of this. See [`get_inbound_payment_scid`].
-       ///
-       /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
-       /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
-       ///
-       /// [`inbound_scid_alias`]: Self::inbound_scid_alias
-       /// [`outbound_scid_alias`]: Self::outbound_scid_alias
-       /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
-       /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
-       /// [`confirmations_required`]: Self::confirmations_required
-       pub short_channel_id: Option<u64>,
-       /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
-       /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
-       /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
-       /// `Some(0)`).
-       ///
-       /// This will be `None` as long as the channel is not available for routing outbound payments.
-       ///
-       /// [`short_channel_id`]: Self::short_channel_id
-       /// [`confirmations_required`]: Self::confirmations_required
-       pub outbound_scid_alias: Option<u64>,
-       /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
-       /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
-       /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
-       /// when they see a payment to be routed to us.
-       ///
-       /// Our counterparty may choose to rotate this value at any time, though will always recognize
-       /// previous values for inbound payment forwarding.
-       ///
-       /// [`short_channel_id`]: Self::short_channel_id
-       pub inbound_scid_alias: Option<u64>,
-       /// The value, in satoshis, of this channel as appears in the funding output
-       pub channel_value_satoshis: u64,
-       /// The value, in satoshis, that must always be held in the channel for us. This value ensures
-       /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
-       /// this value on chain.
-       ///
-       /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
-       ///
-       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
-       ///
-       /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
-       pub unspendable_punishment_reserve: Option<u64>,
-       /// The `user_channel_id` value passed in to [`ChannelManager::create_channel`] for outbound
-       /// channels, or to [`ChannelManager::accept_inbound_channel`] for inbound channels if
-       /// [`UserConfig::manually_accept_inbound_channels`] config flag is set to true. Otherwise
-       /// `user_channel_id` will be randomized for an inbound channel.  This may be zero for objects
-       /// serialized with LDK versions prior to 0.0.113.
-       ///
-       /// [`ChannelManager::create_channel`]: crate::ln::channelmanager::ChannelManager::create_channel
-       /// [`ChannelManager::accept_inbound_channel`]: crate::ln::channelmanager::ChannelManager::accept_inbound_channel
-       /// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
-       pub user_channel_id: u128,
-       /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
-       /// which is applied to commitment and HTLC transactions.
-       ///
-       /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
-       pub feerate_sat_per_1000_weight: Option<u32>,
-       /// Our total balance.  This is the amount we would get if we close the channel.
-       /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
-       /// amount is not likely to be recoverable on close.
-       ///
-       /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
-       /// balance is not available for inclusion in new outbound HTLCs). This further does not include
-       /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
-       /// This does not consider any on-chain fees.
-       ///
-       /// See also [`ChannelDetails::outbound_capacity_msat`]
-       pub balance_msat: u64,
-       /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
-       /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
-       /// available for inclusion in new outbound HTLCs). This further does not include any pending
-       /// outgoing HTLCs which are awaiting some other resolution to be sent.
-       ///
-       /// See also [`ChannelDetails::balance_msat`]
-       ///
-       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
-       /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
-       /// should be able to spend nearly this amount.
-       pub outbound_capacity_msat: u64,
-       /// The available outbound capacity for sending a single HTLC to the remote peer. This is
-       /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
-       /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
-       /// to use a limit as close as possible to the HTLC limit we can currently send.
-       ///
-       /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
-       /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
-       pub next_outbound_htlc_limit_msat: u64,
-       /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
-       /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
-       /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
-       /// route which is valid.
-       pub next_outbound_htlc_minimum_msat: u64,
-       /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
-       /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
-       /// available for inclusion in new inbound HTLCs).
-       /// Note that there are some corner cases not fully handled here, so the actual available
-       /// inbound capacity may be slightly higher than this.
-       ///
-       /// This value is not exact. Due to various in-flight changes, feerate changes, and our
-       /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
-       /// However, our counterparty should be able to spend nearly this amount.
-       pub inbound_capacity_msat: u64,
-       /// The number of required confirmations on the funding transaction before the funding will be
-       /// considered "locked". This number is selected by the channel fundee (i.e. us if
-       /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
-       /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
-       /// [`ChannelHandshakeLimits::max_minimum_depth`].
-       ///
-       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
-       ///
-       /// [`is_outbound`]: ChannelDetails::is_outbound
-       /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
-       /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
-       pub confirmations_required: Option<u32>,
-       /// The current number of confirmations on the funding transaction.
-       ///
-       /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
-       pub confirmations: Option<u32>,
-       /// The number of blocks (after our commitment transaction confirms) that we will need to wait
-       /// until we can claim our funds after we force-close the channel. During this time our
-       /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
-       /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
-       /// time to claim our non-HTLC-encumbered funds.
-       ///
-       /// This value will be `None` for outbound channels until the counterparty accepts the channel.
-       pub force_close_spend_delay: Option<u16>,
-       /// True if the channel was initiated (and thus funded) by us.
-       pub is_outbound: bool,
-       /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
-       /// channel is not currently being shut down. `channel_ready` message exchange implies the
-       /// required confirmation count has been reached (and we were connected to the peer at some
-       /// point after the funding transaction received enough confirmations). The required
-       /// confirmation count is provided in [`confirmations_required`].
-       ///
-       /// [`confirmations_required`]: ChannelDetails::confirmations_required
-       pub is_channel_ready: bool,
-       /// The stage of the channel's shutdown.
-       /// `None` for `ChannelDetails` serialized on LDK versions prior to 0.0.116.
-       pub channel_shutdown_state: Option<ChannelShutdownState>,
-       /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
-       /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
-       ///
-       /// This is a strict superset of `is_channel_ready`.
-       pub is_usable: bool,
-       /// True if this channel is (or will be) publicly-announced.
-       pub is_public: bool,
-       /// The smallest value HTLC (in msat) we will accept, for this channel. This field
-       /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107
-       pub inbound_htlc_minimum_msat: Option<u64>,
-       /// The largest value HTLC (in msat) we currently will accept, for this channel.
-       pub inbound_htlc_maximum_msat: Option<u64>,
-       /// Set of configurable parameters that affect channel operation.
-       ///
-       /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
-       pub config: Option<ChannelConfig>,
-       /// Pending inbound HTLCs.
-       ///
-       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
-       pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
-       /// Pending outbound HTLCs.
-       ///
-       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
-       pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
-}
-
-impl ChannelDetails {
-       /// Gets the current SCID which should be used to identify this channel for inbound payments.
-       /// This should be used for providing invoice hints or in any other context where our
-       /// counterparty will forward a payment to us.
-       ///
-       /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
-       /// [`ChannelDetails::short_channel_id`]. See those for more information.
-       pub fn get_inbound_payment_scid(&self) -> Option<u64> {
-               self.inbound_scid_alias.or(self.short_channel_id)
-       }
-
-       /// Gets the current SCID which should be used to identify this channel for outbound payments.
-       /// This should be used in [`Route`]s to describe the first hop or in other contexts where
-       /// we're sending or forwarding a payment outbound over this channel.
-       ///
-       /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
-       /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
-       pub fn get_outbound_payment_scid(&self) -> Option<u64> {
-               self.short_channel_id.or(self.outbound_scid_alias)
-       }
-
-       fn from_channel_context<SP: Deref, F: Deref>(
-               context: &ChannelContext<SP>, best_block_height: u32, latest_features: InitFeatures,
-               fee_estimator: &LowerBoundedFeeEstimator<F>
-       ) -> Self
-       where
-               SP::Target: SignerProvider,
-               F::Target: FeeEstimator
-       {
-               let balance = context.get_available_balances(fee_estimator);
-               let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
-                       context.get_holder_counterparty_selected_channel_reserve_satoshis();
-               ChannelDetails {
-                       channel_id: context.channel_id(),
-                       counterparty: ChannelCounterparty {
-                               node_id: context.get_counterparty_node_id(),
-                               features: latest_features,
-                               unspendable_punishment_reserve: to_remote_reserve_satoshis,
-                               forwarding_info: context.counterparty_forwarding_info(),
-                               // Ensures that we have actually received the `htlc_minimum_msat` value
-                               // from the counterparty through the `OpenChannel` or `AcceptChannel`
-                               // message (as they are always the first message from the counterparty).
-                               // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
-                               // default `0` value set by `Channel::new_outbound`.
-                               outbound_htlc_minimum_msat: if context.have_received_message() {
-                                       Some(context.get_counterparty_htlc_minimum_msat()) } else { None },
-                               outbound_htlc_maximum_msat: context.get_counterparty_htlc_maximum_msat(),
-                       },
-                       funding_txo: context.get_funding_txo(),
-                       // Note that accept_channel (or open_channel) is always the first message, so
-                       // `have_received_message` indicates that type negotiation has completed.
-                       channel_type: if context.have_received_message() { Some(context.get_channel_type().clone()) } else { None },
-                       short_channel_id: context.get_short_channel_id(),
-                       outbound_scid_alias: if context.is_usable() { Some(context.outbound_scid_alias()) } else { None },
-                       inbound_scid_alias: context.latest_inbound_scid_alias(),
-                       channel_value_satoshis: context.get_value_satoshis(),
-                       feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()),
-                       unspendable_punishment_reserve: to_self_reserve_satoshis,
-                       balance_msat: balance.balance_msat,
-                       inbound_capacity_msat: balance.inbound_capacity_msat,
-                       outbound_capacity_msat: balance.outbound_capacity_msat,
-                       next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
-                       next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
-                       user_channel_id: context.get_user_id(),
-                       confirmations_required: context.minimum_depth(),
-                       confirmations: Some(context.get_funding_tx_confirmations(best_block_height)),
-                       force_close_spend_delay: context.get_counterparty_selected_contest_delay(),
-                       is_outbound: context.is_outbound(),
-                       is_channel_ready: context.is_usable(),
-                       is_usable: context.is_live(),
-                       is_public: context.should_announce(),
-                       inbound_htlc_minimum_msat: Some(context.get_holder_htlc_minimum_msat()),
-                       inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
-                       config: Some(context.config()),
-                       channel_shutdown_state: Some(context.shutdown_state()),
-                       pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
-                       pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
-               }
-       }
-}
-
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-/// Further information on the details of the channel shutdown.
-/// Upon channels being forced closed (i.e. commitment transaction confirmation detected
-/// by `ChainMonitor`), ChannelShutdownState will be set to `ShutdownComplete` or
-/// the channel will be removed shortly.
-/// Also note, that in normal operation, peers could disconnect at any of these states
-/// and require peer re-connection before making progress onto other states
-pub enum ChannelShutdownState {
-       /// Channel has not sent or received a shutdown message.
-       NotShuttingDown,
-       /// Local node has sent a shutdown message for this channel.
-       ShutdownInitiated,
-       /// Shutdown message exchanges have concluded and the channels are in the midst of
-       /// resolving all existing open HTLCs before closing can continue.
-       ResolvingHTLCs,
-       /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates.
-       NegotiatingClosingFee,
-       /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about
-       /// to drop the channel.
-       ShutdownComplete,
-}
+/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
+/// short-lived, while anything with a greater expiration is considered long-lived.
+///
+/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
+/// will included a [`BlindedPath`] created using:
+/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
+/// - [`MessageRouter::create_blinded_paths`] when long-lived.
+///
+/// Using compact [`BlindedPath`]s may provide better privacy as the [`MessageRouter`] could select
+/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
+/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
+pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
 
 /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
@@ -2002,19 +2390,20 @@ macro_rules! handle_error {
                match $internal {
                        Ok(msg) => Ok(msg),
                        Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
-                               let mut msg_events = Vec::with_capacity(2);
+                               let mut msg_event = None;
 
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
                                        let counterparty_node_id = shutdown_res.counterparty_node_id;
                                        let channel_id = shutdown_res.channel_id;
                                        let logger = WithContext::from(
-                                               &$self.logger, Some(counterparty_node_id), Some(channel_id),
+                                               &$self.logger, Some(counterparty_node_id), Some(channel_id), None
                                        );
                                        log_error!(logger, "Force-closing channel: {}", err.err);
 
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
-                                               msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                               let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
+                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
@@ -2024,17 +2413,17 @@ macro_rules! handle_error {
 
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
-                                       msg_events.push(events::MessageSendEvent::HandleError {
+                                       msg_event = Some(events::MessageSendEvent::HandleError {
                                                node_id: $counterparty_node_id,
                                                action: err.action.clone()
                                        });
                                }
 
-                               if !msg_events.is_empty() {
+                               if let Some(msg_event) = msg_event {
                                        let per_peer_state = $self.per_peer_state.read().unwrap();
                                        if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
                                                let mut peer_state = peer_state_mutex.lock().unwrap();
-                                               peer_state.pending_msg_events.append(&mut msg_events);
+                                               peer_state.pending_msg_events.push(msg_event);
                                        }
                                }
 
@@ -2077,11 +2466,10 @@ macro_rules! convert_chan_phase_err {
                        ChannelError::Ignore(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
                        },
-                       ChannelError::Close(msg) => {
-                               let logger = WithChannelContext::from(&$self.logger, &$channel.context);
+                       ChannelError::Close((msg, reason)) => {
+                               let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
                                log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
-                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
                                let shutdown_res = $channel.context.force_shutdown(true, reason);
                                let err =
                                        MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
@@ -2106,11 +2494,11 @@ macro_rules! convert_chan_phase_err {
                        ChannelPhase::UnfundedInboundV1(channel) => {
                                convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedOutboundV2(channel) => {
                                convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
-                       #[cfg(dual_funding)]
+                       #[cfg(any(dual_funding, splicing))]
                        ChannelPhase::UnfundedInboundV2(channel) => {
                                convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
                        },
@@ -2213,7 +2601,7 @@ macro_rules! emit_channel_ready_event {
 
 macro_rules! handle_monitor_update_completion {
        ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
-               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
                let mut updates = $chan.monitor_updating_restored(&&logger,
                        &$self.node_signer, $self.chain_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height);
@@ -2235,9 +2623,9 @@ macro_rules! handle_monitor_update_completion {
                let update_actions = $peer_state.monitor_update_blocked_actions
                        .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
 
-               let htlc_forwards = $self.handle_channel_resumption(
+               let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
                        &mut $peer_state.pending_msg_events, $chan, updates.raa,
-                       updates.commitment_update, updates.order, updates.accepted_htlcs,
+                       updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
                        updates.funding_broadcastable, updates.channel_ready,
                        updates.announcement_sigs);
                if let Some(upd) = channel_update {
@@ -2298,6 +2686,9 @@ macro_rules! handle_monitor_update_completion {
                if let Some(forwards) = htlc_forwards {
                        $self.forward_htlcs(&mut [forwards][..]);
                }
+               if let Some(decode) = decode_update_add_htlcs {
+                       $self.push_decode_update_add_htlcs(decode);
+               }
                $self.finalize_claims(updates.finalized_claimed_htlcs);
                for failure in updates.failed_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
@@ -2309,7 +2700,7 @@ macro_rules! handle_monitor_update_completion {
 macro_rules! handle_new_monitor_update {
        ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
-               let logger = WithChannelContext::from(&$self.logger, &$chan.context);
+               let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
                match $update_res {
                        ChannelMonitorUpdateStatus::UnrecoverableError => {
                                let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
@@ -2474,6 +2865,7 @@ where
                        pending_inbound_payments: Mutex::new(new_hash_map()),
                        pending_outbound_payments: OutboundPayments::new(),
                        forward_htlcs: Mutex::new(new_hash_map()),
+                       decode_update_add_htlcs: Mutex::new(new_hash_map()),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
                        pending_intercepted_htlcs: Mutex::new(new_hash_map()),
                        outpoint_to_peer: Mutex::new(new_hash_map()),
@@ -2501,6 +2893,9 @@ where
                        funding_batch_states: Mutex::new(BTreeMap::new()),
 
                        pending_offers_messages: Mutex::new(Vec::new()),
+                       pending_broadcast_messages: Mutex::new(Vec::new()),
+
+                       last_days_feerates: Mutex::new(VecDeque::new()),
 
                        entropy_source,
                        node_signer,
@@ -2793,7 +3188,7 @@ where
                                                }
                                        } else {
                                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
-                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
@@ -2888,7 +3283,7 @@ where
                }
 
                let logger = WithContext::from(
-                       &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
+                       &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
                );
 
                log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
@@ -2962,9 +3357,9 @@ where
                        let closure_reason = if let Some(peer_msg) = peer_msg {
                                ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
                        } else {
-                               ClosureReason::HolderForceClosed
+                               ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
                        };
-                       let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
+                       let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                log_error!(logger, "Force-closing channel {}", channel_id);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
@@ -2980,8 +3375,8 @@ where
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
-                                       // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
-                                       #[cfg(dual_funding)]
+                                       // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+                                       #[cfg(any(dual_funding, splicing))]
                                        ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
                                                self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
                                                // Unfunded channel has no update
@@ -2999,24 +3394,21 @@ where
                        }
                };
                if let Some(update) = update_opt {
-                       // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
-                       // not try to broadcast it via whatever peer we have.
-                       let per_peer_state = self.per_peer_state.read().unwrap();
-                       let a_peer_state_opt = per_peer_state.get(peer_node_id)
-                               .ok_or(per_peer_state.values().next());
-                       if let Ok(a_peer_state_mutex) = a_peer_state_opt {
-                               let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
-                               a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                       msg: update
-                               });
-                       }
+                       // If we have some Channel Update to broadcast, we cache it and broadcast it later.
+                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                               msg: update
+                       });
                }
 
                Ok(counterparty_node_id)
        }
 
-       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
+       -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               log_debug!(self.logger,
+                       "Force-closing channel, The error message sent to the peer : {}", error_message);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
                                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3025,8 +3417,8 @@ where
                                        peer_state.pending_msg_events.push(
                                                events::MessageSendEvent::HandleError {
                                                        node_id: counterparty_node_id,
-                                                       action: msgs::ErrorAction::DisconnectPeer {
-                                                               msg: Some(msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() })
+                                                       action: msgs::ErrorAction::SendErrorMessage {
+                                                               msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
                                                        },
                                                }
                                        );
@@ -3037,40 +3429,211 @@ where
                }
        }
 
-       /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
-       /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
-       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
-       /// channel.
-       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+       /// Force closes a channel, immediately broadcasting the latest local transaction(s),
+       /// rejecting new HTLCs.
+       ///
+       /// The provided `error_message` is sent to connected peers for closing
+       /// channels and should be a human-readable description of what went wrong.
+       ///
+       /// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
+       /// isn't the counterparty of the corresponding channel.
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
        -> Result<(), APIError> {
-               self.force_close_sending_error(channel_id, counterparty_node_id, true)
+               self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
        }
 
        /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
-       /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
-       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+       /// the latest local transaction(s).
+       ///
+       /// The provided `error_message` is sent to connected peers for closing channels and should
+       /// be a human-readable description of what went wrong.
        ///
+       /// Fails if `channel_id` is unknown to the manager, or if the
+       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        /// You can always broadcast the latest local transaction(s) via
        /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
-       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
        -> Result<(), APIError> {
-               self.force_close_sending_error(channel_id, counterparty_node_id, false)
+               self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
        }
 
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
        /// for each to the chain and rejecting new HTLCs on each.
-       pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+       ///
+       /// The provided `error_message` is sent to connected peers for closing channels and should
+       /// be a human-readable description of what went wrong.
+       pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
                }
        }
 
        /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
        /// local transaction(s).
-       pub fn force_close_all_channels_without_broadcasting_txn(&self) {
+       ///
+       /// The provided `error_message` is sent to connected peers for closing channels and
+       /// should be a human-readable description of what went wrong.
+       pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
+               }
+       }
+
+       fn can_forward_htlc_to_outgoing_channel(
+               &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
+       ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+               if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+                       // Note that the behavior here should be identical to the above block - we
+                       // should NOT reveal the existence or non-existence of a private channel if
+                       // we don't allow forwards outbound over them.
+                       return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+               }
+               if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
+                       // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+                       // "refuse to forward unless the SCID alias was used", so we pretend
+                       // we don't have the channel here.
+                       return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+               }
+
+               // Note that we could technically not return an error yet here and just hope
+               // that the connection is reestablished or monitor updated by the time we get
+               // around to doing the actual forward, but better to fail early if we can and
+               // hopefully an attacker trying to path-trace payments cannot make this occur
+               // on a small/per-node/per-channel scale.
+               if !chan.context.is_live() { // channel_disabled
+                       // If the channel_update we're going to return is disabled (i.e. the
+                       // peer has been disabled for some time), return `channel_disabled`,
+                       // otherwise return `temporary_channel_failure`.
+                       let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+                       if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+                               return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+                       } else {
+                               return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+                       }
+               }
+               if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+                       let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+                       return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+               }
+               if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
+                       let chan_update_opt = self.get_channel_update_for_onion(next_packet.outgoing_scid, chan).ok();
+                       return Err((err, code, chan_update_opt));
+               }
+
+               Ok(())
+       }
+
+       /// Executes a callback `C` that returns some value `X` on the channel found with the given
+       /// `scid`. `None` is returned when the channel is not found.
+       fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
+               &self, scid: u64, callback: C,
+       ) -> Option<X> {
+               let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
+                       None => return None,
+                       Some((cp_id, id)) => (cp_id, id),
+               };
+               let per_peer_state = self.per_peer_state.read().unwrap();
+               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+               if peer_state_mutex_opt.is_none() {
+                       return None;
+               }
+               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+               let peer_state = &mut *peer_state_lock;
+               match peer_state.channel_by_id.get_mut(&channel_id).and_then(
+                       |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+               ) {
+                       None => None,
+                       Some(chan) => Some(callback(chan)),
+               }
+       }
+
+       fn can_forward_htlc(
+               &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
+       ) -> Result<(), (&'static str, u16, Option<msgs::ChannelUpdate>)> {
+               match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+                       self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
+               }) {
+                       Some(Ok(())) => {},
+                       Some(Err(e)) => return Err(e),
+                       None => {
+                               // If we couldn't find the channel info for the scid, it may be a phantom or
+                               // intercept forward.
+                               if (self.default_configuration.accept_intercept_htlcs &&
+                                       fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
+                                       fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
+                               {} else {
+                                       return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+                               }
+                       }
+               }
+
+               let cur_height = self.best_block.read().unwrap().height + 1;
+               if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
+                       cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
+               ) {
+                       let chan_update_opt = self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
+                               self.get_channel_update_for_onion(next_packet_details.outgoing_scid, chan).ok()
+                       }).flatten();
+                       return Err((err_msg, err_code, chan_update_opt));
+               }
+
+               Ok(())
+       }
+
+       fn htlc_failure_from_update_add_err(
+               &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
+               mut err_code: u16, chan_update: Option<msgs::ChannelUpdate>, is_intro_node_blinded_forward: bool,
+               shared_secret: &[u8; 32]
+       ) -> HTLCFailureMsg {
+               let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+               if chan_update.is_some() && err_code & 0x1000 == 0x1000 {
+                       let chan_update = chan_update.unwrap();
+                       if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
+                               msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+                       }
+                       else if err_code == 0x1000 | 13 {
+                               msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+                       }
+                       else if err_code == 0x1000 | 20 {
+                               // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                               0u16.write(&mut res).expect("Writes cannot fail");
+                       }
+                       (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+                       msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+                       chan_update.write(&mut res).expect("Writes cannot fail");
+               } else if err_code & 0x1000 == 0x1000 {
+                       // If we're trying to return an error that requires a `channel_update` but
+                       // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+                       // generate an update), just use the generic "temporary_node_failure"
+                       // instead.
+                       err_code = 0x2000 | 2;
+               }
+
+               log_info!(
+                       WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
+                       "Failed to accept/forward incoming HTLC: {}", err_msg
+               );
+               // If `msg.blinding_point` is set, we must always fail with malformed.
+               if msg.blinding_point.is_some() {
+                       return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                               channel_id: msg.channel_id,
+                               htlc_id: msg.htlc_id,
+                               sha256_of_onion: [0; 32],
+                               failure_code: INVALID_ONION_BLINDING,
+                       });
                }
+
+               let (err_code, err_data) = if is_intro_node_blinded_forward {
+                       (INVALID_ONION_BLINDING, &[0; 32][..])
+               } else {
+                       (err_code, &res.0[..])
+               };
+               HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                       channel_id: msg.channel_id,
+                       htlc_id: msg.htlc_id,
+                       reason: HTLCFailReason::reason(err_code, err_data.to_vec())
+                               .get_encrypted_failure_packet(shared_secret, &None),
+               })
        }
 
        fn decode_update_add_htlc_onion(
@@ -3082,48 +3645,7 @@ where
                        msg, &self.node_signer, &self.logger, &self.secp_ctx
                )?;
 
-               let is_intro_node_forward = match next_hop {
-                       onion_utils::Hop::Forward {
-                               next_hop_data: msgs::InboundOnionPayload::BlindedForward {
-                                       intro_node_blinding_point: Some(_), ..
-                               }, ..
-                       } => true,
-                       _ => false,
-               };
-
-               macro_rules! return_err {
-                       ($msg: expr, $err_code: expr, $data: expr) => {
-                               {
-                                       log_info!(
-                                               WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id)),
-                                               "Failed to accept/forward incoming HTLC: {}", $msg
-                                       );
-                                       // If `msg.blinding_point` is set, we must always fail with malformed.
-                                       if msg.blinding_point.is_some() {
-                                               return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
-                                                       channel_id: msg.channel_id,
-                                                       htlc_id: msg.htlc_id,
-                                                       sha256_of_onion: [0; 32],
-                                                       failure_code: INVALID_ONION_BLINDING,
-                                               }));
-                                       }
-
-                                       let (err_code, err_data) = if is_intro_node_forward {
-                                               (INVALID_ONION_BLINDING, &[0; 32][..])
-                                       } else { ($err_code, $data) };
-                                       return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
-                                               channel_id: msg.channel_id,
-                                               htlc_id: msg.htlc_id,
-                                               reason: HTLCFailReason::reason(err_code, err_data.to_vec())
-                                                       .get_encrypted_failure_packet(&shared_secret, &None),
-                                       }));
-                               }
-                       }
-               }
-
-               let NextPacketDetails {
-                       next_packet_pubkey, outgoing_amt_msat, outgoing_scid, outgoing_cltv_value
-               } = match next_packet_details_opt {
+               let next_packet_details = match next_packet_details_opt {
                        Some(next_packet_details) => next_packet_details,
                        // it is a receive, so no need for outbound checks
                        None => return Ok((next_hop, shared_secret, None)),
@@ -3131,124 +3653,15 @@ where
 
                // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
                // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
-               if let Some((err, mut code, chan_update)) = loop {
-                       let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
-                       let forwarding_chan_info_opt = match id_option {
-                               None => { // unknown_next_peer
-                                       // Note that this is likely a timing oracle for detecting whether an scid is a
-                                       // phantom or an intercept.
-                                       if (self.default_configuration.accept_intercept_htlcs &&
-                                               fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
-                                               fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
-                                       {
-                                               None
-                                       } else {
-                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                                       }
-                               },
-                               Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
-                       };
-                       let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
-                               let per_peer_state = self.per_peer_state.read().unwrap();
-                               let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
-                               if peer_state_mutex_opt.is_none() {
-                                       break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                               }
-                               let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
-                                       |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
-                               ).flatten() {
-                                       None => {
-                                               // Channel was removed. The short_to_chan_info and channel_by_id maps
-                                               // have no consistency guarantees.
-                                               break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
-                                       },
-                                       Some(chan) => chan
-                               };
-                               if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
-                                       // Note that the behavior here should be identical to the above block - we
-                                       // should NOT reveal the existence or non-existence of a private channel if
-                                       // we don't allow forwards outbound over them.
-                                       break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
-                               }
-                               if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
-                                       // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
-                                       // "refuse to forward unless the SCID alias was used", so we pretend
-                                       // we don't have the channel here.
-                                       break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
-                               }
-                               let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
-
-                               // Note that we could technically not return an error yet here and just hope
-                               // that the connection is reestablished or monitor updated by the time we get
-                               // around to doing the actual forward, but better to fail early if we can and
-                               // hopefully an attacker trying to path-trace payments cannot make this occur
-                               // on a small/per-node/per-channel scale.
-                               if !chan.context.is_live() { // channel_disabled
-                                       // If the channel_update we're going to return is disabled (i.e. the
-                                       // peer has been disabled for some time), return `channel_disabled`,
-                                       // otherwise return `temporary_channel_failure`.
-                                       if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
-                                               break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
-                                       } else {
-                                               break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
-                                       }
-                               }
-                               if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
-                                       break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
-                               }
-                               if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
-                                       break Some((err, code, chan_update_opt));
-                               }
-                               chan_update_opt
-                       } else {
-                               None
-                       };
-
-                       let cur_height = self.best_block.read().unwrap().height + 1;
-
-                       if let Err((err_msg, code)) = check_incoming_htlc_cltv(
-                               cur_height, outgoing_cltv_value, msg.cltv_expiry
-                       ) {
-                               if code & 0x1000 != 0 && chan_update_opt.is_none() {
-                                       // We really should set `incorrect_cltv_expiry` here but as we're not
-                                       // forwarding over a real channel we can't generate a channel_update
-                                       // for it. Instead we just return a generic temporary_node_failure.
-                                       break Some((err_msg, 0x2000 | 2, None))
-                               }
-                               let chan_update_opt = if code & 0x1000 != 0 { chan_update_opt } else { None };
-                               break Some((err_msg, code, chan_update_opt));
-                       }
+               self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
+                       let (err_msg, err_code, chan_update_opt) = e;
+                       self.htlc_failure_from_update_add_err(
+                               msg, counterparty_node_id, err_msg, err_code, chan_update_opt,
+                               next_hop.is_intro_node_blinded_forward(), &shared_secret
+                       )
+               })?;
 
-                       break None;
-               }
-               {
-                       let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
-                       if let Some(chan_update) = chan_update {
-                               if code == 0x1000 | 11 || code == 0x1000 | 12 {
-                                       msg.amount_msat.write(&mut res).expect("Writes cannot fail");
-                               }
-                               else if code == 0x1000 | 13 {
-                                       msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
-                               }
-                               else if code == 0x1000 | 20 {
-                                       // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
-                                       0u16.write(&mut res).expect("Writes cannot fail");
-                               }
-                               (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
-                               msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
-                               chan_update.write(&mut res).expect("Writes cannot fail");
-                       } else if code & 0x1000 == 0x1000 {
-                               // If we're trying to return an error that requires a `channel_update` but
-                               // we're forwarding to a phantom or intercept "channel" (i.e. cannot
-                               // generate an update), just use the generic "temporary_node_failure"
-                               // instead.
-                               code = 0x2000 | 2;
-                       }
-                       return_err!(err, code, &res.0[..]);
-               }
-               Ok((next_hop, shared_secret, Some(next_packet_pubkey)))
+               Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
        }
 
        fn construct_pending_htlc_status<'a>(
@@ -3259,7 +3672,7 @@ where
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
-                                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+                                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
                                        log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
                                        if msg.blinding_point.is_some() {
                                                return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
@@ -3328,7 +3741,7 @@ where
                if chan.context.get_short_channel_id().is_none() {
                        return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
                }
-               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
                self.get_channel_update_for_unicast(chan)
        }
@@ -3345,7 +3758,7 @@ where
        /// [`channel_update`]: msgs::ChannelUpdate
        /// [`internal_closing_signed`]: Self::internal_closing_signed
        fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
                let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
                        None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
@@ -3356,7 +3769,7 @@ where
        }
 
        fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
-               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
                let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
 
@@ -3395,8 +3808,8 @@ where
        pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
                let _lck = self.total_consistency_lock.read().unwrap();
                self.send_payment_along_path(SendAlongPathArgs {
-                       path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
-                       session_priv_bytes
+                       path, payment_hash, recipient_onion: &recipient_onion, total_value,
+                       cur_height, payment_id, keysend_preimage, session_priv_bytes
                })
        }
 
@@ -3414,7 +3827,7 @@ where
                        &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
                        payment_hash, keysend_preimage, prng_seed
                ).map_err(|e| {
-                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
                        log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
                        e
                })?;
@@ -3422,14 +3835,14 @@ where
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
                                None => {
-                                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None);
+                                       let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
                                        log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
                                        return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
                                },
                                Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        };
 
-                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id));
+                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
                        log_trace!(logger,
                                "Attempting to send payment with payment hash {} along path with next hop {}",
                                payment_hash, path.hops.first().unwrap().short_channel_id);
@@ -3446,7 +3859,7 @@ where
                                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
                                                }
                                                let funding_txo = chan.context.get_funding_txo().unwrap();
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
                                                let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
                                                        htlc_cltv, HTLCSource::OutboundRoute {
                                                                path: path.clone(),
@@ -3583,14 +3996,43 @@ where
                self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
        }
 
-       pub(super) fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
+       /// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`.
+       ///
+       /// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested
+       /// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this
+       /// fails or if the encoded `payment_id` is not recognized. The latter may happen once the
+       /// payment is no longer tracked because the payment was attempted after:
+       /// - an invoice for the `payment_id` was already paid,
+       /// - one full [timer tick] has elapsed since initially requesting the invoice when paying an
+       ///   offer, or
+       /// - the refund corresponding to the invoice has already expired.
+       ///
+       /// To retry the payment, request another invoice using a new `payment_id`.
+       ///
+       /// Attempting to pay the same invoice twice while the first payment is still pending will
+       /// result in a [`Bolt12PaymentError::DuplicateInvoice`].
+       ///
+       /// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate
+       /// whether or not the payment was successful.
+       ///
+       /// [timer tick]: Self::timer_tick_occurred
+       pub fn send_payment_for_bolt12_invoice(&self, invoice: &Bolt12Invoice) -> Result<(), Bolt12PaymentError> {
+               let secp_ctx = &self.secp_ctx;
+               let expanded_key = &self.inbound_payment_key;
+               match invoice.verify(expanded_key, secp_ctx) {
+                       Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
+                       Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
+               }
+       }
+
+       fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
                let best_block_height = self.best_block.read().unwrap().height;
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_for_bolt12_invoice(
                                invoice, payment_id, &self.router, self.list_usable_channels(),
-                               || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
-                               best_block_height, &self.logger, &self.pending_events,
+                               || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+                               &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
                                |args| self.send_payment_along_path(args)
                        )
        }
@@ -3794,7 +4236,7 @@ where
 
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
-       fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, APIError>>(
+       fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>, &Transaction) -> Result<OutPoint, &'static str>>(
                &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
                mut find_funding_output: FundingOutput,
        ) -> Result<(), APIError> {
@@ -3807,26 +4249,37 @@ where
                let funding_txo;
                let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
                        Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
-                               funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
-                               let logger = WithChannelContext::from(&self.logger, &chan.context);
-                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
-                                       .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
-                                               let channel_id = chan.context.channel_id();
-                                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
-                                               let shutdown_res = chan.context.force_shutdown(false, reason);
-                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
-                                       } else { unreachable!(); });
+                               macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
+                                       let counterparty;
+                                       let err = if let ChannelError::Close((msg, reason)) = $err {
+                                               let channel_id = $chan.context.channel_id();
+                                               counterparty = chan.context.get_counterparty_node_id();
+                                               let shutdown_res = $chan.context.force_shutdown(false, reason);
+                                               MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
+                                       } else { unreachable!(); };
+
+                                       mem::drop(peer_state_lock);
+                                       mem::drop(per_peer_state);
+                                       let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
+                                       Err($api_err)
+                               } } }
+                               match find_funding_output(&chan, &funding_transaction) {
+                                       Ok(found_funding_txo) => funding_txo = found_funding_txo,
+                                       Err(err) => {
+                                               let chan_err = ChannelError::close(err.to_owned());
+                                               let api_err = APIError::APIMisuseError { err: err.to_owned() };
+                                               return close_chan!(chan_err, api_err, chan);
+                                       },
+                               }
+
+                               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
+                               let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
                                match funding_res {
                                        Ok(funding_msg) => (chan, funding_msg),
-                                       Err((chan, err)) => {
-                                               mem::drop(peer_state_lock);
-                                               mem::drop(per_peer_state);
-                                               let _: Result<(), _> = handle_error!(self, Err(err), chan.context.get_counterparty_node_id());
-                                               return Err(APIError::ChannelUnavailable {
-                                                       err: "Signer refused to sign the initial commitment transaction".to_owned()
-                                               });
-                                       },
+                                       Err((mut chan, chan_err)) => {
+                                               let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
+                                               return close_chan!(chan_err, api_err, chan);
+                                       }
                                }
                        },
                        Some(phase) => {
@@ -3931,7 +4384,7 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let mut result = Ok(());
 
-               if !funding_transaction.is_coin_base() {
+               if !funding_transaction.is_coinbase() {
                        for inp in funding_transaction.input.iter() {
                                if inp.witness.is_empty() {
                                        result = result.and(Err(APIError::APIMisuseError {
@@ -3987,21 +4440,17 @@ where
                                is_batch_funding,
                                |chan, tx| {
                                        let mut output_index = None;
-                                       let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh();
+                                       let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
                                        for (idx, outp) in tx.output.iter().enumerate() {
-                                               if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
+                                               if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
                                                        if output_index.is_some() {
-                                                               return Err(APIError::APIMisuseError {
-                                                                       err: "Multiple outputs matched the expected script and value".to_owned()
-                                                               });
+                                                               return Err("Multiple outputs matched the expected script and value");
                                                        }
                                                        output_index = Some(idx as u16);
                                                }
                                        }
                                        if output_index.is_none() {
-                                               return Err(APIError::APIMisuseError {
-                                                       err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
-                                               });
+                                               return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
                                        }
                                        let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
                                        if let Some(funding_batch_state) = funding_batch_state.as_mut() {
@@ -4032,11 +4481,20 @@ where
                                for (channel_id, counterparty_node_id) in channels_to_remove {
                                        per_peer_state.get(&counterparty_node_id)
                                                .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
-                                               .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
-                                               .map(|mut chan| {
+                                               .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
+                                               .map(|(mut chan, mut peer_state)| {
                                                        update_maps_on_chan_removal!(self, &chan.context());
                                                        let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
                                                        shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                               node_id: counterparty_node_id,
+                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage {
+                                                                               channel_id,
+                                                                               data: "Failed to fund channel".to_owned(),
+                                                                       }
+                                                               },
+                                                       });
                                                });
                                }
                        }
@@ -4085,6 +4543,7 @@ where
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
+
                for channel_id in channel_ids {
                        if !peer_state.has_channel(channel_id) {
                                return Err(APIError::ChannelUnavailable {
@@ -4101,7 +4560,8 @@ where
                                }
                                if let ChannelPhase::Funded(channel) = channel_phase {
                                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
                                        } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                        node_id: channel.context.get_counterparty_node_id(),
@@ -4201,7 +4661,7 @@ where
                                None => {
                                        let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
                                                next_hop_channel_id, next_node_id);
-                                       let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id));
+                                       let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
                                        log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
                                        return Err(APIError::ChannelUnavailable {
                                                err: error
@@ -4276,6 +4736,145 @@ where
                Ok(())
        }
 
+       fn process_pending_update_add_htlcs(&self) {
+               let mut decode_update_add_htlcs = new_hash_map();
+               mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
+
+               let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
+                       if let Some(outgoing_scid) = outgoing_scid_opt {
+                               match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
+                                       Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
+                                               HTLCDestination::NextHopChannel {
+                                                       node_id: Some(*outgoing_counterparty_node_id),
+                                                       channel_id: *outgoing_channel_id,
+                                               },
+                                       None => HTLCDestination::UnknownNextHop {
+                                               requested_forward_scid: outgoing_scid,
+                                       },
+                               }
+                       } else {
+                               HTLCDestination::FailedPayment { payment_hash }
+                       }
+               };
+
+               'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
+                       let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+                               let counterparty_node_id = chan.context.get_counterparty_node_id();
+                               let channel_id = chan.context.channel_id();
+                               let funding_txo = chan.context.get_funding_txo().unwrap();
+                               let user_channel_id = chan.context.get_user_id();
+                               let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
+                               (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
+                       });
+                       let (
+                               incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
+                               incoming_user_channel_id, incoming_accept_underpaying_htlcs
+                        ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
+                               incoming_channel_details
+                       } else {
+                               // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+                               continue;
+                       };
+
+                       let mut htlc_forwards = Vec::new();
+                       let mut htlc_fails = Vec::new();
+                       for update_add_htlc in &update_add_htlcs {
+                               let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
+                                       &update_add_htlc, &self.node_signer, &self.logger, &self.secp_ctx
+                               ) {
+                                       Ok(decoded_onion) => decoded_onion,
+                                       Err(htlc_fail) => {
+                                               htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
+                                               continue;
+                                       },
+                               };
+
+                               let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
+                               let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
+
+                               // Process the HTLC on the incoming channel.
+                               match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
+                                       chan.can_accept_incoming_htlc(
+                                               update_add_htlc, &self.fee_estimator, &logger,
+                                       )
+                               }) {
+                                       Some(Ok(_)) => {},
+                                       Some(Err((err, code))) => {
+                                               let outgoing_chan_update_opt = if let Some(outgoing_scid) = outgoing_scid_opt.as_ref() {
+                                                       self.do_funded_channel_callback(*outgoing_scid, |chan: &mut Channel<SP>| {
+                                                               self.get_channel_update_for_onion(*outgoing_scid, chan).ok()
+                                                       }).flatten()
+                                               } else {
+                                                       None
+                                               };
+                                               let htlc_fail = self.htlc_failure_from_update_add_err(
+                                                       &update_add_htlc, &incoming_counterparty_node_id, err, code,
+                                                       outgoing_chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+                                               );
+                                               let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+                                               htlc_fails.push((htlc_fail, htlc_destination));
+                                               continue;
+                                       },
+                                       // The incoming channel no longer exists, HTLCs should be resolved onchain instead.
+                                       None => continue 'outer_loop,
+                               }
+
+                               // Now process the HTLC on the outgoing channel if it's a forward.
+                               if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
+                                       if let Err((err, code, chan_update_opt)) = self.can_forward_htlc(
+                                               &update_add_htlc, next_packet_details
+                                       ) {
+                                               let htlc_fail = self.htlc_failure_from_update_add_err(
+                                                       &update_add_htlc, &incoming_counterparty_node_id, err, code,
+                                                       chan_update_opt, is_intro_node_blinded_forward, &shared_secret,
+                                               );
+                                               let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+                                               htlc_fails.push((htlc_fail, htlc_destination));
+                                               continue;
+                                       }
+                               }
+
+                               match self.construct_pending_htlc_status(
+                                       &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
+                                       incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
+                               ) {
+                                       PendingHTLCStatus::Forward(htlc_forward) => {
+                                               htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
+                                       },
+                                       PendingHTLCStatus::Fail(htlc_fail) => {
+                                               let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
+                                               htlc_fails.push((htlc_fail, htlc_destination));
+                                       },
+                               }
+                       }
+
+                       // Process all of the forwards and failures for the channel in which the HTLCs were
+                       // proposed to as a batch.
+                       let pending_forwards = (incoming_scid, incoming_funding_txo, incoming_channel_id,
+                               incoming_user_channel_id, htlc_forwards.drain(..).collect());
+                       self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
+                       for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
+                               let failure = match htlc_fail {
+                                       HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
+                                               htlc_id: fail_htlc.htlc_id,
+                                               err_packet: fail_htlc.reason,
+                                       },
+                                       HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
+                                               htlc_id: fail_malformed_htlc.htlc_id,
+                                               sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
+                                               failure_code: fail_malformed_htlc.failure_code,
+                                       },
+                               };
+                               self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_insert(vec![]).push(failure);
+                               self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
+                                       prev_channel_id: incoming_channel_id,
+                                       failed_next_destination: htlc_destination,
+                               }, None));
+                       }
+               }
+       }
+
        /// Processes HTLCs which are pending waiting on random forward delay.
        ///
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
@@ -4283,6 +4882,8 @@ where
        pub fn process_pending_htlc_forwards(&self) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
+               self.process_pending_update_add_htlcs();
+
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
                let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
@@ -4294,8 +4895,8 @@ where
                                if short_chan_id != 0 {
                                        let mut forwarding_counterparty = None;
                                        macro_rules! forwarding_channel_not_found {
-                                               () => {
-                                                       for forward_info in pending_forwards.drain(..) {
+                                               ($forward_infos: expr) => {
+                                                       for forward_info in $forward_infos {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                                prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
@@ -4306,7 +4907,7 @@ where
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
                                                                                                log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
@@ -4403,7 +5004,7 @@ where
                                        let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
                                                Some((cp_id, chan_id)) => (cp_id, chan_id),
                                                None => {
-                                                       forwarding_channel_not_found!();
+                                                       forwarding_channel_not_found!(pending_forwards.drain(..));
                                                        continue;
                                                }
                                        };
@@ -4411,95 +5012,148 @@ where
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                        let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
                                        if peer_state_mutex_opt.is_none() {
-                                               forwarding_channel_not_found!();
+                                               forwarding_channel_not_found!(pending_forwards.drain(..));
                                                continue;
                                        }
                                        let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                                        let peer_state = &mut *peer_state_lock;
-                                       if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                               for forward_info in pending_forwards.drain(..) {
-                                                       let queue_fail_htlc_res = match forward_info {
-                                                               HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                       prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
-                                                                       prev_user_channel_id, forward_info: PendingHTLCInfo {
-                                                                               incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
-                                                                               routing: PendingHTLCRouting::Forward {
-                                                                                       onion_packet, blinded, ..
-                                                                               }, skimmed_fee_msat, ..
+                                       let mut draining_pending_forwards = pending_forwards.drain(..);
+                                       while let Some(forward_info) = draining_pending_forwards.next() {
+                                               let queue_fail_htlc_res = match forward_info {
+                                                       HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+                                                               prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                               prev_user_channel_id, forward_info: PendingHTLCInfo {
+                                                                       incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+                                                                       routing: PendingHTLCRouting::Forward {
+                                                                               ref onion_packet, blinded, ..
+                                                                       }, skimmed_fee_msat, ..
+                                                               },
+                                                       }) => {
+                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                       short_channel_id: prev_short_channel_id,
+                                                                       user_channel_id: Some(prev_user_channel_id),
+                                                                       channel_id: prev_channel_id,
+                                                                       outpoint: prev_funding_outpoint,
+                                                                       htlc_id: prev_htlc_id,
+                                                                       incoming_packet_shared_secret: incoming_shared_secret,
+                                                                       // Phantom payments are only PendingHTLCRouting::Receive.
+                                                                       phantom_shared_secret: None,
+                                                                       blinded_failure: blinded.map(|b| b.failure),
+                                                               });
+                                                               let next_blinding_point = blinded.and_then(|b| {
+                                                                       let encrypted_tlvs_ss = self.node_signer.ecdh(
+                                                                               Recipient::Node, &b.inbound_blinding_point, None
+                                                                       ).unwrap().secret_bytes();
+                                                                       onion_utils::next_hop_pubkey(
+                                                                               &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+                                                                       ).ok()
+                                                               });
+
+                                                               // Forward the HTLC over the most appropriate channel with the corresponding peer,
+                                                               // applying non-strict forwarding.
+                                                               // The channel with the least amount of outbound liquidity will be used to maximize the
+                                                               // probability of being able to successfully forward a subsequent HTLC.
+                                                               let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
+                                                                       ChannelPhase::Funded(chan) => {
+                                                                               let balances = chan.context.get_available_balances(&self.fee_estimator);
+                                                                               if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
+                                                                                       outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
+                                                                                       chan.context.is_usable() {
+                                                                                       Some((chan, balances))
+                                                                               } else {
+                                                                                       None
+                                                                               }
                                                                        },
-                                                               }) => {
-                                                                       log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
-                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                               short_channel_id: prev_short_channel_id,
-                                                                               user_channel_id: Some(prev_user_channel_id),
-                                                                               channel_id: prev_channel_id,
-                                                                               outpoint: prev_funding_outpoint,
-                                                                               htlc_id: prev_htlc_id,
-                                                                               incoming_packet_shared_secret: incoming_shared_secret,
-                                                                               // Phantom payments are only PendingHTLCRouting::Receive.
-                                                                               phantom_shared_secret: None,
-                                                                               blinded_failure: blinded.map(|b| b.failure),
-                                                                       });
-                                                                       let next_blinding_point = blinded.and_then(|b| {
-                                                                               let encrypted_tlvs_ss = self.node_signer.ecdh(
-                                                                                       Recipient::Node, &b.inbound_blinding_point, None
-                                                                               ).unwrap().secret_bytes();
-                                                                               onion_utils::next_hop_pubkey(
-                                                                                       &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
-                                                                               ).ok()
-                                                                       });
-                                                                       if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
-                                                                               payment_hash, outgoing_cltv_value, htlc_source.clone(),
-                                                                               onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
-                                                                               &&logger)
-                                                                       {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+                                                                       _ => None,
+                                                               }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
+                                                               let optimal_channel = match maybe_optimal_channel {
+                                                                       Some(chan) => chan,
+                                                                       None => {
+                                                                               // Fall back to the specified channel to return an appropriate error.
+                                                                               if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                                                                       chan
                                                                                } else {
-                                                                                       panic!("Stated return value requirements in send_htlc() were not met");
+                                                                                       forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+                                                                                       break;
                                                                                }
+                                                                       }
+                                                               };
+
+                                                               let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
+                                                               let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
+                                                                       "specified"
+                                                               } else {
+                                                                       "alternate"
+                                                               };
+                                                               log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
+                                                                       prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
+                                                               if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
+                                                                               payment_hash, outgoing_cltv_value, htlc_source.clone(),
+                                                                               onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+                                                                               &&logger)
+                                                               {
+                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                               log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
+                                                                       } else {
+                                                                               panic!("Stated return value requirements in send_htlc() were not met");
+                                                                       }
+
+                                                                       if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
                                                                                let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
                                                                                failed_forwards.push((htlc_source, payment_hash,
                                                                                        HTLCFailReason::reason(failure_code, data),
                                                                                        HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                ));
-                                                                               continue;
+                                                                       } else {
+                                                                               forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+                                                                               break;
                                                                        }
-                                                                       None
-                                                               },
-                                                               HTLCForwardInfo::AddHTLC { .. } => {
-                                                                       panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
-                                                               },
-                                                               HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+                                                               }
+                                                               None
+                                                       },
+                                                       HTLCForwardInfo::AddHTLC { .. } => {
+                                                               panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+                                                       },
+                                                       HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
+                                                               if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                                        log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
-                                                               },
-                                                               HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                                                                       Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
+                                                               } else {
+                                                                       forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+                                                                       break;
+                                                               }
+                                                       },
+                                                       HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+                                                               if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                                        log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
                                                                        let res = chan.queue_fail_malformed_htlc(
                                                                                htlc_id, failure_code, sha256_of_onion, &&logger
                                                                        );
                                                                        Some((res, htlc_id))
-                                                               },
-                                                       };
-                                                       if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
-                                                               if let Err(e) = queue_fail_htlc_res {
-                                                                       if let ChannelError::Ignore(msg) = e {
+                                                               } else {
+                                                                       forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+                                                                       break;
+                                                               }
+                                                       },
+                                               };
+                                               if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+                                                       if let Err(e) = queue_fail_htlc_res {
+                                                               if let ChannelError::Ignore(msg) = e {
+                                                                       if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+                                                                               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                                                log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                       } else {
-                                                                               panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
                                                                        }
-                                                                       // fail-backs are best-effort, we probably already have one
-                                                                       // pending, and if not that's OK, if not, the channel is on
-                                                                       // the chain and sending the HTLC-Timeout is their problem.
-                                                                       continue;
+                                                               } else {
+                                                                       panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
                                                                }
+                                                               // fail-backs are best-effort, we probably already have one
+                                                               // pending, and if not that's OK, if not, the channel is on
+                                                               // the chain and sending the HTLC-Timeout is their problem.
+                                                               continue;
                                                        }
                                                }
-                                       } else {
-                                               forwarding_channel_not_found!();
-                                               continue;
                                        }
                                } else {
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
@@ -4512,25 +5166,29 @@ where
                                                                }
                                                        }) => {
                                                                let blinded_failure = routing.blinded_failure();
-                                                               let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret, mut onion_fields) = match routing {
+                                                               let (cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret, mut onion_fields) = match routing {
                                                                        PendingHTLCRouting::Receive {
-                                                                               payment_data, payment_metadata, incoming_cltv_expiry, phantom_shared_secret,
-                                                                               custom_tlvs, requires_blinded_error: _
+                                                                               payment_data, payment_metadata, payment_context,
+                                                                               incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
+                                                                               requires_blinded_error: _
                                                                        } => {
                                                                                let _legacy_hop_data = Some(payment_data.clone());
                                                                                let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
                                                                                                payment_metadata, custom_tlvs };
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
-                                                                                       Some(payment_data), phantom_shared_secret, onion_fields)
+                                                                                       Some(payment_data), payment_context, phantom_shared_secret, onion_fields)
                                                                        },
-                                                                       PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
+                                                                       PendingHTLCRouting::ReceiveKeysend {
+                                                                               payment_data, payment_preimage, payment_metadata,
+                                                                               incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
+                                                                       } => {
                                                                                let onion_fields = RecipientOnionFields {
                                                                                        payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
                                                                                        payment_metadata,
                                                                                        custom_tlvs,
                                                                                };
                                                                                (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
-                                                                                       payment_data, None, onion_fields)
+                                                                                       payment_data, None, None, onion_fields)
                                                                        },
                                                                        _ => {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
@@ -4595,10 +5253,7 @@ where
                                                                macro_rules! check_total_value {
                                                                        ($purpose: expr) => {{
                                                                                let mut payment_claimable_generated = false;
-                                                                               let is_keysend = match $purpose {
-                                                                                       events::PaymentPurpose::SpontaneousPayment(_) => true,
-                                                                                       events::PaymentPurpose::InvoicePayment { .. } => false,
-                                                                               };
+                                                                               let is_keysend = $purpose.is_keysend();
                                                                                let mut claimable_payments = self.claimable_payments.lock().unwrap();
                                                                                if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
@@ -4712,10 +5367,11 @@ where
                                                                                                                fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
-                                                                                               let purpose = events::PaymentPurpose::InvoicePayment {
-                                                                                                       payment_preimage: payment_preimage.clone(),
-                                                                                                       payment_secret: payment_data.payment_secret,
-                                                                                               };
+                                                                                               let purpose = events::PaymentPurpose::from_parts(
+                                                                                                       payment_preimage,
+                                                                                                       payment_data.payment_secret,
+                                                                                                       payment_context,
+                                                                                               );
                                                                                                check_total_value!(purpose);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
@@ -4738,10 +5394,11 @@ where
                                                                                                &payment_hash, payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
-                                                                                       let purpose = events::PaymentPurpose::InvoicePayment {
-                                                                                               payment_preimage: inbound_payment.get().payment_preimage,
-                                                                                               payment_secret: payment_data.payment_secret,
-                                                                                       };
+                                                                                       let purpose = events::PaymentPurpose::from_parts(
+                                                                                               inbound_payment.get().payment_preimage,
+                                                                                               payment_data.payment_secret,
+                                                                                               payment_context,
+                                                                                       );
                                                                                        let payment_claimable_generated = check_total_value!(purpose);
                                                                                        if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
@@ -4858,7 +5515,7 @@ where
        fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
                if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
 
-               let logger = WithChannelContext::from(&self.logger, &chan.context);
+               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
 
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
@@ -4951,11 +5608,11 @@ where
                        | {
                                context.maybe_expire_prev_config();
                                if unfunded_context.should_expire_unfunded_channel() {
-                                       let logger = WithChannelContext::from(&self.logger, context);
+                                       let logger = WithChannelContext::from(&self.logger, context, None);
                                        log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
-                                       shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
+                                       shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
@@ -5007,7 +5664,8 @@ where
                                                                                if n >= DISABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
@@ -5021,7 +5679,8 @@ where
                                                                                if n >= ENABLE_GOSSIP_TICKS {
                                                                                        chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
                                                                                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                        msg: update
                                                                                                });
                                                                                        }
@@ -5036,7 +5695,7 @@ where
                                                                chan.context.maybe_expire_prev_config();
 
                                                                if chan.should_disconnect_peer_awaiting_response() {
-                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                                        log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
                                                                                        counterparty_node_id, chan_id);
                                                                        pending_msg_events.push(MessageSendEvent::HandleError {
@@ -5060,12 +5719,12 @@ where
                                                                process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
                                                                        pending_msg_events, counterparty_node_id)
                                                        },
-                                                       #[cfg(dual_funding)]
+                                                       #[cfg(any(dual_funding, splicing))]
                                                        ChannelPhase::UnfundedInboundV2(chan) => {
                                                                process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
                                                                        pending_msg_events, counterparty_node_id)
                                                        },
-                                                       #[cfg(dual_funding)]
+                                                       #[cfg(any(dual_funding, splicing))]
                                                        ChannelPhase::UnfundedOutboundV2(chan) => {
                                                                process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
                                                                        pending_msg_events, counterparty_node_id)
@@ -5075,7 +5734,7 @@ where
 
                                        for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
                                                if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
-                                                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id));
+                                                       let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
                                                        log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
                                                        peer_state.pending_msg_events.push(
                                                                events::MessageSendEvent::HandleError {
@@ -5324,9 +5983,14 @@ where
                }
        }
 
+       fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+               let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
+               if push_forward_event { self.push_pending_forwards_ev(); }
+       }
+
        /// Fails an HTLC backwards to the sender of it to us.
        /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
-       fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
+       fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
                // Ensure that no peer state channel storage lock is held when calling this function.
                // This ensures that future code doesn't introduce a lock-order requirement for
                // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
@@ -5344,19 +6008,19 @@ where
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // from block_connected which may run during initialization prior to the chain_monitor
                // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
+               let mut push_forward_event;
                match source {
                        HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
-                               if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
+                               push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
                                        session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
-                                       &self.pending_events, &self.logger)
-                               { self.push_pending_forwards_ev(); }
+                                       &self.pending_events, &self.logger);
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
                                ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
                        }) => {
                                log_trace!(
-                                       WithContext::from(&self.logger, None, Some(*channel_id)),
+                                       WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
                                        "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
                                        if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
                                );
@@ -5383,11 +6047,9 @@ where
                                        }
                                };
 
-                               let mut push_forward_ev = false;
+                               push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
-                               if forward_htlcs.is_empty() {
-                                       push_forward_ev = true;
-                               }
+                               push_forward_event &= forward_htlcs.is_empty();
                                match forward_htlcs.entry(*short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                entry.get_mut().push(failure);
@@ -5397,7 +6059,6 @@ where
                                        }
                                }
                                mem::drop(forward_htlcs);
-                               if push_forward_ev { self.push_pending_forwards_ev(); }
                                let mut pending_events = self.pending_events.lock().unwrap();
                                pending_events.push_back((events::Event::HTLCHandlingFailed {
                                        prev_channel_id: *channel_id,
@@ -5405,6 +6066,7 @@ where
                                }, None));
                        },
                }
+               push_forward_event
        }
 
        /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
@@ -5466,19 +6128,27 @@ where
                                        }
                                }
 
-                               let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
-                               let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
-                               let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
-                                       ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
-                                       payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value
-                               });
-                               if dup_purpose.is_some() {
-                                       debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
-                                       log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
-                                               &payment_hash);
-                               }
+                               let claiming_payment = claimable_payments.pending_claiming_payments
+                                       .entry(payment_hash)
+                                       .and_modify(|_| {
+                                               debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
+                                               log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
+                                                       &payment_hash);
+                                       })
+                                       .or_insert_with(|| {
+                                               let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
+                                               let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
+                                               ClaimingPayment {
+                                                       amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
+                                                       payment_purpose: payment.purpose,
+                                                       receiver_node_id,
+                                                       htlcs,
+                                                       sender_intended_value,
+                                                       onion_fields: payment.onion_fields,
+                                               }
+                                       });
 
-                               if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = payment.onion_fields {
+                               if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields {
                                        if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
                                                log_info!(self.logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
                                                        &payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
@@ -5541,21 +6211,13 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
-                               let prev_hop_chan_id = htlc.prev_hop.channel_id;
-                               if let Err((pk, err)) = self.claim_funds_from_hop(
+                               self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
                                                debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
                                                Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
                                        }
-                               ) {
-                                       if let msgs::ErrorAction::IgnoreError = err.err.action {
-                                               // We got a temporary failure updating monitor, but will claim the
-                                               // HTLC when the monitor updating is restored (or on chain).
-                                               let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id));
-                                               log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
-                                       } else { errs.push((pk, err)); }
-                               }
+                               );
                        }
                }
                if !valid_mpp {
@@ -5577,9 +6239,10 @@ where
                }
        }
 
-       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
-               prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
-       -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+       fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+               &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+               completion_action: ComplFunc,
+       ) {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
 
                // If we haven't yet run background events assume we're still deserializing and shouldn't
@@ -5611,7 +6274,7 @@ where
                                if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
 
                                                match fulfill_res {
@@ -5641,7 +6304,7 @@ where
                                                                let action = if let Some(action) = completion_action(None, true) {
                                                                        action
                                                                } else {
-                                                                       return Ok(());
+                                                                       return;
                                                                };
                                                                mem::drop(peer_state_lock);
 
@@ -5657,7 +6320,7 @@ where
                                                                } else {
                                                                        debug_assert!(false,
                                                                                "Duplicate claims should always free another channel immediately");
-                                                                       return Ok(());
+                                                                       return;
                                                                };
                                                                if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
                                                                        let mut peer_state = peer_state_mtx.lock().unwrap();
@@ -5682,7 +6345,7 @@ where
                                                        }
                                                }
                                        }
-                                       return Ok(());
+                                       return;
                                }
                        }
                }
@@ -5704,7 +6367,7 @@ where
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
-                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
                                        "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
@@ -5730,7 +6393,6 @@ where
                // generally always allowed to be duplicative (and it's specifically noted in
                // `PaymentForwarded`).
                self.handle_monitor_update_completion_actions(completion_action(None, false));
-               Ok(())
        }
 
        fn finalize_claims(&self, sources: Vec<HTLCSource>) {
@@ -5740,7 +6402,7 @@ where
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
                forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
                startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
-               next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
+               next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
        ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
@@ -5759,12 +6421,11 @@ where
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
                                let prev_channel_id = hop_data.channel_id;
+                               let prev_user_channel_id = hop_data.user_channel_id;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
-                               #[cfg(debug_assertions)]
-                               let claiming_channel_id = hop_data.channel_id;
-                               let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+                               self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                        if let Some(node_id) = next_channel_counterparty_node_id {
@@ -5821,7 +6482,7 @@ where
                                                                                BackgroundEvent::MonitorUpdatesComplete {
                                                                                        channel_id, ..
                                                                                } =>
-                                                                                       *channel_id == claiming_channel_id,
+                                                                                       *channel_id == prev_channel_id,
                                                                        }
                                                                }), "{:?}", *background_events);
                                                        }
@@ -5845,21 +6506,19 @@ where
                                                                "skimmed_fee_msat must always be included in total_fee_earned_msat");
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
-                                                                       total_fee_earned_msat,
-                                                                       claim_from_onchain_tx: from_onchain,
                                                                        prev_channel_id: Some(prev_channel_id),
                                                                        next_channel_id: Some(next_channel_id),
-                                                                       outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+                                                                       prev_user_channel_id,
+                                                                       next_user_channel_id,
+                                                                       total_fee_earned_msat,
                                                                        skimmed_fee_msat,
+                                                                       claim_from_onchain_tx: from_onchain,
+                                                                       outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
                                                                },
                                                                downstream_counterparty_and_funding_outpoint: chan_to_release,
                                                        })
                                                }
                                        });
-                               if let Err((pk, err)) = res {
-                                       let result: Result<(), _> = Err(err);
-                                       let _ = handle_error!(self, result, pk);
-                               }
                        },
                }
        }
@@ -5884,6 +6543,7 @@ where
                                                receiver_node_id,
                                                htlcs,
                                                sender_intended_value: sender_intended_total_msat,
+                                               onion_fields,
                                        }) = payment {
                                                self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed {
                                                        payment_hash,
@@ -5892,6 +6552,7 @@ where
                                                        receiver_node_id: Some(receiver_node_id),
                                                        htlcs,
                                                        sender_intended_total_msat,
+                                                       onion_fields,
                                                }, None));
                                        }
                                },
@@ -5922,24 +6583,31 @@ where
        fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
                channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
-               pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
+               pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
+               funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-       -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
-               let logger = WithChannelContext::from(&self.logger, &channel.context);
-               log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
+       -> (Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
+               let logger = WithChannelContext::from(&self.logger, &channel.context, None);
+               log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
                        if raa.is_some() { "an" } else { "no" },
-                       if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
+                       if commitment_update.is_some() { "a" } else { "no" },
+                       pending_forwards.len(), pending_update_adds.len(),
                        if funding_broadcastable.is_some() { "" } else { "not " },
                        if channel_ready.is_some() { "sending" } else { "without" },
                        if announcement_sigs.is_some() { "sending" } else { "without" });
 
-               let mut htlc_forwards = None;
-
                let counterparty_node_id = channel.context.get_counterparty_node_id();
+               let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
+
+               let mut htlc_forwards = None;
                if !pending_forwards.is_empty() {
-                       htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
-                               channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
+                       htlc_forwards = Some((short_channel_id, channel.context.get_funding_txo().unwrap(),
+                               channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
+               }
+               let mut decode_update_add_htlcs = None;
+               if !pending_update_adds.is_empty() {
+                       decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
                }
 
                if let Some(msg) = channel_ready {
@@ -5990,7 +6658,7 @@ where
                        emit_channel_ready_event!(pending_events, channel);
                }
 
-               htlc_forwards
+               (htlc_forwards, decode_update_add_htlcs)
        }
 
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
@@ -6030,11 +6698,11 @@ where
                                pending.retain(|upd| upd.update_id > highest_applied_update_id);
                                pending.len()
                        } else { 0 };
-               let logger = WithChannelContext::from(&self.logger, &channel.context);
+               let logger = WithChannelContext::from(&self.logger, &channel.context, None);
                log_trace!(logger, "ChannelMonitor updated to {}. Current highest is {}. {} pending in-flight updates.",
                        highest_applied_update_id, channel.context.get_latest_monitor_update_id(),
                        remaining_in_flight);
-               if !channel.is_awaiting_monitor_update() || channel.context.get_latest_monitor_update_id() != highest_applied_update_id {
+               if !channel.is_awaiting_monitor_update() || remaining_in_flight != 0 {
                        return;
                }
                handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, channel);
@@ -6084,7 +6752,7 @@ where
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
 
-               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
@@ -6105,73 +6773,82 @@ where
                // happening and return an error. N.B. that we create channel with an outbound SCID of zero so
                // that we can delay allocating the SCID until after we're sure that the checks below will
                // succeed.
-               let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+               let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
                        Some(unaccepted_channel) => {
                                let best_block_height = self.best_block.read().unwrap().height;
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
-                                       &self.logger, accept_0conf).map_err(|e| {
-                                               let err_str = e.to_string();
-                                               log_error!(logger, "{}", err_str);
-
-                                               APIError::ChannelUnavailable { err: err_str }
-                                       })
-                               }
+                                       &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
+                       },
                        _ => {
-                               let err_str = "No such channel awaiting to be accepted.".to_owned();
-                               log_error!(logger, "{}", err_str);
-
-                               Err(APIError::APIMisuseError { err: err_str })
-                       }
-               }?;
-
-               if accept_0conf {
-                       // This should have been correctly configured by the call to InboundV1Channel::new.
-                       debug_assert!(channel.context.minimum_depth().unwrap() == 0);
-               } else if channel.context.get_channel_type().requires_zero_conf() {
-                       let send_msg_err_event = events::MessageSendEvent::HandleError {
-                               node_id: channel.context.get_counterparty_node_id(),
-                               action: msgs::ErrorAction::SendErrorMessage{
-                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
-                               }
-                       };
-                       peer_state.pending_msg_events.push(send_msg_err_event);
-                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
-                       log_error!(logger, "{}", err_str);
-
-                       return Err(APIError::APIMisuseError { err: err_str });
-               } else {
-                       // If this peer already has some channels, a new channel won't increase our number of peers
-                       // with unfunded channels, so as long as we aren't over the maximum number of unfunded
-                       // channels per-peer we can accept channels from a peer with existing ones.
-                       if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
-                               let send_msg_err_event = events::MessageSendEvent::HandleError {
-                                       node_id: channel.context.get_counterparty_node_id(),
-                                       action: msgs::ErrorAction::SendErrorMessage{
-                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
-                                       }
-                               };
-                               peer_state.pending_msg_events.push(send_msg_err_event);
-                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                               let err_str = "No such channel awaiting to be accepted.".to_owned();
                                log_error!(logger, "{}", err_str);
 
                                return Err(APIError::APIMisuseError { err: err_str });
                        }
-               }
+               };
 
-               // Now that we know we have a channel, assign an outbound SCID alias.
-               let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
-               channel.context.set_outbound_scid_alias(outbound_scid_alias);
+               match res {
+                       Err(err) => {
+                               mem::drop(peer_state_lock);
+                               mem::drop(per_peer_state);
+                               match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
+                                       Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
+                                       Err(e) => {
+                                               return Err(APIError::ChannelUnavailable { err: e.err });
+                                       },
+                               }
+                       }
+                       Ok(mut channel) => {
+                               if accept_0conf {
+                                       // This should have been correctly configured by the call to InboundV1Channel::new.
+                                       debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+                               } else if channel.context.get_channel_type().requires_zero_conf() {
+                                       let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                               node_id: channel.context.get_counterparty_node_id(),
+                                               action: msgs::ErrorAction::SendErrorMessage{
+                                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+                                               }
+                                       };
+                                       peer_state.pending_msg_events.push(send_msg_err_event);
+                                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+                                       log_error!(logger, "{}", err_str);
 
-               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
-                       node_id: channel.context.get_counterparty_node_id(),
-                       msg: channel.accept_inbound_channel(),
-               });
+                                       return Err(APIError::APIMisuseError { err: err_str });
+                               } else {
+                                       // If this peer already has some channels, a new channel won't increase our number of peers
+                                       // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+                                       // channels per-peer we can accept channels from a peer with existing ones.
+                                       if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+                                               let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                                       node_id: channel.context.get_counterparty_node_id(),
+                                                       action: msgs::ErrorAction::SendErrorMessage{
+                                                               msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+                                                       }
+                                               };
+                                               peer_state.pending_msg_events.push(send_msg_err_event);
+                                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                                               log_error!(logger, "{}", err_str);
 
-               peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+                                               return Err(APIError::APIMisuseError { err: err_str });
+                                       }
+                               }
 
-               Ok(())
+                               // Now that we know we have a channel, assign an outbound SCID alias.
+                               let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+                               channel.context.set_outbound_scid_alias(outbound_scid_alias);
+
+                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+                                       node_id: channel.context.get_counterparty_node_id(),
+                                       msg: channel.accept_inbound_channel(),
+                               });
+
+                               peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+
+                               Ok(())
+                       },
+               }
        }
 
        /// Gets the number of peers which match the given filter and do not have any funded, outbound,
@@ -6217,8 +6894,8 @@ where
                                                num_unfunded_channels += 1;
                                        }
                                },
-                               // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
-                               #[cfg(dual_funding)]
+                               // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+                               #[cfg(any(dual_funding, splicing))]
                                ChannelPhase::UnfundedInboundV2(chan) => {
                                        // Only inbound V2 channels that are not 0conf and that we do not contribute to will be
                                        // included in the unfunded count.
@@ -6231,8 +6908,8 @@ where
                                        // Outbound channels don't contribute to the unfunded count in the DoS context.
                                        continue;
                                },
-                               // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
-                               #[cfg(dual_funding)]
+                               // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+                               #[cfg(any(dual_funding, splicing))]
                                ChannelPhase::UnfundedOutboundV2(_) => {
                                        // Outbound channels don't contribute to the unfunded count in the DoS context.
                                        continue;
@@ -6375,7 +7052,7 @@ where
                                        match phase.get_mut() {
                                                ChannelPhase::UnfundedOutboundV1(chan) => {
                                                        try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
-                                                       (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+                                                       (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
                                                },
                                                _ => {
                                                        return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
@@ -6411,7 +7088,7 @@ where
                let (mut chan, funding_msg_opt, monitor) =
                        match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
                                Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
-                                       let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
+                                       let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
                                        match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
                                                Ok(res) => res,
                                                Err((inbound_chan, err)) => {
@@ -6428,7 +7105,7 @@ where
                                },
                                Some(mut phase) => {
                                        let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
-                                       let err = ChannelError::Close(err_msg);
+                                       let err = ChannelError::close(err_msg);
                                        return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
@@ -6443,7 +7120,7 @@ where
                        // `update_maps_on_chan_removal`), we'll remove the existing channel
                        // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
                        // on the channel.
-                       let err = ChannelError::Close($err.to_owned());
+                       let err = ChannelError::close($err.to_owned());
                        chan.unset_funding_info(msg.temporary_channel_id);
                        return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
                } } }
@@ -6483,7 +7160,7 @@ where
                                                        }
                                                        Ok(())
                                                } else {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
                                                        fail_chan!("Duplicate funding outpoint");
                                                }
@@ -6511,7 +7188,8 @@ where
                                        let logger = WithContext::from(
                                                &self.logger,
                                                Some(chan.context.get_counterparty_node_id()),
-                                               Some(chan.context.channel_id())
+                                               Some(chan.context.channel_id()),
+                                               None
                                        );
                                        let res =
                                                chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
@@ -6527,7 +7205,7 @@ where
                                                                } else { unreachable!(); }
                                                                Ok(())
                                                        } else {
-                                                               let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
                                                                // We weren't able to watch the channel to begin with, so no
                                                                // updates should be made on it. Previously, full_stack_target
                                                                // found an (unreachable) panic when the monitor update contained
@@ -6567,7 +7245,7 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                        let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
                                                self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
                                        if let Some(announcement_sigs) = announcement_sigs_opt {
@@ -6598,7 +7276,7 @@ where
 
                                        Ok(())
                                } else {
-                                       try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
                                }
                        },
@@ -6625,7 +7303,7 @@ where
                                match phase {
                                        ChannelPhase::Funded(chan) => {
                                                if !chan.received_shutdown() {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
                                                                msg.channel_id,
                                                                if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
@@ -6653,13 +7331,13 @@ where
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
-                                               let logger = WithChannelContext::from(&self.logger, context);
+                                               let logger = WithChannelContext::from(&self.logger, context, None);
                                                log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
                                                finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
                                        // TODO(dual_funding): Combine this match arm with above.
-                                       #[cfg(dual_funding)]
+                                       #[cfg(any(dual_funding, splicing))]
                                        ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
                                                let context = phase.context_mut();
                                                log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
@@ -6713,7 +7391,7 @@ where
                                                        (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result)
                                                } else { (tx, None, shutdown_result) }
                                        } else {
-                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                        "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
@@ -6722,14 +7400,13 @@ where
                };
                if let Some(broadcast_tx) = tx {
                        let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
-                       log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id), "Broadcasting {}", log_tx!(broadcast_tx));
+                       log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
                        self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
                }
                if let Some(ChannelPhase::Funded(chan)) = chan_option {
                        if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                               let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                               pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                        msg: update
                                });
                        }
@@ -6766,7 +7443,7 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       let pending_forward_info = match decoded_hop_res {
+                                       let mut pending_forward_info = match decoded_hop_res {
                                                Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
                                                        self.construct_pending_htlc_status(
                                                                msg, counterparty_node_id, shared_secret, next_hop,
@@ -6774,46 +7451,47 @@ where
                                                        ),
                                                Err(e) => PendingHTLCStatus::Fail(e)
                                        };
-                                       let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
+                                       // If the update_add is completely bogus, the call will Err and we will close,
+                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                       if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
                                                if msg.blinding_point.is_some() {
-                                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
-                                                                       msgs::UpdateFailMalformedHTLC {
-                                                                               channel_id: msg.channel_id,
-                                                                               htlc_id: msg.htlc_id,
-                                                                               sha256_of_onion: [0; 32],
-                                                                               failure_code: INVALID_ONION_BLINDING,
-                                                                       }
-                                                       ))
-                                               }
-                                               // If the update_add is completely bogus, the call will Err and we will close,
-                                               // but if we've sent a shutdown and they haven't acknowledged it yet, we just
-                                               // want to reject the new HTLC and fail it backwards instead of forwarding.
-                                               match pending_forward_info {
-                                                       PendingHTLCStatus::Forward(PendingHTLCInfo {
-                                                               ref incoming_shared_secret, ref routing, ..
-                                                       }) => {
-                                                               let reason = if routing.blinded_failure().is_some() {
-                                                                       HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
-                                                               } else if (error_code & 0x1000) != 0 {
-                                                                       let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
-                                                                       HTLCFailReason::reason(real_code, error_data)
-                                                               } else {
-                                                                       HTLCFailReason::from_failure_code(error_code)
-                                                               }.get_encrypted_failure_packet(incoming_shared_secret, &None);
-                                                               let msg = msgs::UpdateFailHTLC {
+                                                       pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
+                                                               msgs::UpdateFailMalformedHTLC {
                                                                        channel_id: msg.channel_id,
                                                                        htlc_id: msg.htlc_id,
-                                                                       reason
-                                                               };
-                                                               PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
-                                                       },
-                                                       _ => pending_forward_info
+                                                                       sha256_of_onion: [0; 32],
+                                                                       failure_code: INVALID_ONION_BLINDING,
+                                                               }
+                                                       ))
+                                               } else {
+                                                       match pending_forward_info {
+                                                               PendingHTLCStatus::Forward(PendingHTLCInfo {
+                                                                       ref incoming_shared_secret, ref routing, ..
+                                                               }) => {
+                                                                       let reason = if routing.blinded_failure().is_some() {
+                                                                               HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
+                                                                       } else if (error_code & 0x1000) != 0 {
+                                                                               let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+                                                                               HTLCFailReason::reason(real_code, error_data)
+                                                                       } else {
+                                                                               HTLCFailReason::from_failure_code(error_code)
+                                                                       }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+                                                                       let msg = msgs::UpdateFailHTLC {
+                                                                               channel_id: msg.channel_id,
+                                                                               htlc_id: msg.htlc_id,
+                                                                               reason
+                                                                       };
+                                                                       pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
+                                                               },
+                                                               _ => {},
+                                                       }
                                                }
-                                       };
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
+                                       }
+                                       try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -6824,6 +7502,7 @@ where
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let funding_txo;
+               let next_user_channel_id;
                let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -6838,7 +7517,7 @@ where
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
                                                if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        log_trace!(logger,
                                                                "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
                                                                msg.channel_id);
@@ -6853,9 +7532,10 @@ where
                                                // outbound HTLC is claimed. This is guaranteed to all complete before we
                                                // process the RAA as messages are processed from single peers serially.
                                                funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+                                               next_user_channel_id = chan.context.get_user_id();
                                                res
                                        } else {
-                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                        "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
@@ -6864,7 +7544,7 @@ where
                };
                self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
                        Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
-                       funding_txo, msg.channel_id
+                       funding_txo, msg.channel_id, Some(next_user_channel_id),
                );
 
                Ok(())
@@ -6886,7 +7566,7 @@ where
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -6909,13 +7589,13 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if (msg.failure_code & 0x8000) == 0 {
-                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+                                       let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
                                        try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
                                }
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                        try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
                                }
                                Ok(())
@@ -6936,7 +7616,7 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                        let funding_txo = chan.context.get_funding_txo();
                                        let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
@@ -6945,7 +7625,7 @@ where
                                        }
                                        Ok(())
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -6953,10 +7633,28 @@ where
                }
        }
 
+       fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
+               let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
+               let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+               push_forward_event &= decode_update_add_htlcs.is_empty();
+               let scid = update_add_htlcs.0;
+               match decode_update_add_htlcs.entry(scid) {
+                       hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
+                       hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
+               }
+               if push_forward_event { self.push_pending_forwards_ev(); }
+       }
+
        #[inline]
        fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+               let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
+               if push_forward_event { self.push_pending_forwards_ev() }
+       }
+
+       #[inline]
+       fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
+               let mut push_forward_event = false;
                for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
-                       let mut push_forward_event = false;
                        let mut new_intercept_events = VecDeque::new();
                        let mut failed_intercept_forwards = Vec::new();
                        if !pending_forwards.is_empty() {
@@ -6969,6 +7667,7 @@ where
                                        // Pull this now to avoid introducing a lock order with `forward_htlcs`.
                                        let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
 
+                                       let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
                                        let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
                                        let forward_htlcs_empty = forward_htlcs.is_empty();
                                        match forward_htlcs.entry(scid) {
@@ -6995,7 +7694,7 @@ where
                                                                                        prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
-                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
+                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
                                                                                log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
@@ -7017,9 +7716,7 @@ where
                                                        } else {
                                                                // We don't want to generate a PendingHTLCsForwardable event if only intercepted
                                                                // payments are being processed.
-                                                               if forward_htlcs_empty {
-                                                                       push_forward_event = true;
-                                                               }
+                                                               push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
                                                                entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
                                                                        prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
                                                        }
@@ -7029,15 +7726,15 @@ where
                        }
 
                        for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
-                               self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
+                               push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
                        }
 
                        if !new_intercept_events.is_empty() {
                                let mut events = self.pending_events.lock().unwrap();
                                events.append(&mut new_intercept_events);
                        }
-                       if push_forward_event { self.push_pending_forwards_ev() }
                }
+               push_forward_event
        }
 
        fn push_pending_forwards_ev(&self) {
@@ -7107,7 +7804,7 @@ where
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
@@ -7124,7 +7821,7 @@ where
                                                }
                                                htlcs_to_fail
                                        } else {
-                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                        "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
@@ -7147,10 +7844,10 @@ where
                match peer_state.channel_by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan_phase_entry) => {
                                if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                        try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -7185,7 +7882,7 @@ where
                                                update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
                                        });
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -7227,7 +7924,7 @@ where
                                        if were_node_one == msg_from_node_one {
                                                return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
-                                               let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                               let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
                                                let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
                                                // If nothing changed after applying their update, we don't need to bother
@@ -7237,7 +7934,7 @@ where
                                                }
                                        }
                                } else {
-                                       return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                       return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
@@ -7247,7 +7944,6 @@ where
        }
 
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
-               let htlc_forwards;
                let need_lnd_workaround = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
 
@@ -7259,7 +7955,7 @@ where
                                                msg.channel_id
                                        )
                                })?;
-                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id));
+                       let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
@@ -7290,15 +7986,17 @@ where
                                                        }
                                                }
                                                let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
-                                               htlc_forwards = self.handle_channel_resumption(
+                                               let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
                                                        &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
-                                                       Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                                       Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+                                               debug_assert!(htlc_forwards.is_none());
+                                               debug_assert!(decode_update_add_htlcs.is_none());
                                                if let Some(upd) = channel_update {
                                                        peer_state.pending_msg_events.push(upd);
                                                }
                                                need_lnd_workaround
                                        } else {
-                                               return try_chan_phase_entry!(self, Err(ChannelError::Close(
+                                               return try_chan_phase_entry!(self, Err(ChannelError::close(
                                                        "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
                                        }
                                },
@@ -7338,16 +8036,10 @@ where
                        }
                };
 
-               let mut persist = NotifyOption::SkipPersistHandleEvents;
-               if let Some(forwards) = htlc_forwards {
-                       self.forward_htlcs(&mut [forwards][..]);
-                       persist = NotifyOption::DoPersist;
-               }
-
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
-               Ok(persist)
+               Ok(NotifyOption::SkipPersistHandleEvents)
        }
 
        /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
@@ -7361,12 +8053,12 @@ where
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
-                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
+                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
                                                        self.claim_funds_internal(htlc_update.source, preimage,
                                                                htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
-                                                               false, counterparty_node_id, funding_outpoint, channel_id);
+                                                               false, counterparty_node_id, funding_outpoint, channel_id, None);
                                                } else {
                                                        log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
                                                        let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
@@ -7374,7 +8066,7 @@ where
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
-                                       MonitorEvent::HolderForceClosed(_funding_outpoint) => {
+                                       MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
@@ -7392,16 +8084,22 @@ where
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
                                                                if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                                               failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
+                                                                               let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
+                                                                                       reason
+                                                                               } else {
+                                                                                       ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
+                                                                               };
+                                                                               failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
-                                                                                               msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
+                                                                                               msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
                                                                                        },
                                                                                });
                                                                        }
@@ -7455,7 +8153,7 @@ where
                                                let counterparty_node_id = chan.context.get_counterparty_node_id();
                                                let funding_txo = chan.context.get_funding_txo();
                                                let (monitor_opt, holding_cell_failed_htlcs) =
-                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context));
+                                                       chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
                                                if !holding_cell_failed_htlcs.is_empty() {
                                                        failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
                                                }
@@ -7562,7 +8260,7 @@ where
                                peer_state.channel_by_id.retain(|channel_id, phase| {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
                                                                Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
                                                                        if let Some(msg) = msg_opt {
@@ -7579,7 +8277,8 @@ where
                                                                                // We're done with this channel. We got a closing_signed and sent back
                                                                                // a closing_signed with a closing transaction to broadcast.
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
@@ -7644,16 +8343,15 @@ where
 
 macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
        /// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
-       /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer will
-       /// not have an expiration unless otherwise set on the builder.
+       /// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
+       /// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
        ///
        /// # Privacy
        ///
-       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
-       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
-       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
-       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
-       /// order to send the [`InvoiceRequest`].
+       /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the offer based on the given
+       /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+       /// privacy implications as well as those of the parameterized [`Router`], which implements
+       /// [`MessageRouter`].
        ///
        /// Also, uses a derived signing pubkey in the offer for recipient privacy.
        ///
@@ -7666,25 +8364,29 @@ macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
        ///
        /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
        ///
-       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
-       ///
        /// [`Offer`]: crate::offers::offer::Offer
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        pub fn create_offer_builder(
-               &$self, description: String
+               &$self, absolute_expiry: Option<Duration>
        ) -> Result<$builder, Bolt12SemanticError> {
                let node_id = $self.get_our_node_id();
                let expanded_key = &$self.inbound_payment_key;
                let entropy = &*$self.entropy_source;
                let secp_ctx = &$self.secp_ctx;
 
-               let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+               let path = $self.create_blinded_path_using_absolute_expiry(absolute_expiry)
+                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = OfferBuilder::deriving_signing_pubkey(
-                       description, node_id, expanded_key, entropy, secp_ctx
+                       node_id, expanded_key, entropy, secp_ctx
                )
                        .chain_hash($self.chain_hash)
                        .path(path);
 
+               let builder = match absolute_expiry {
+                       None => builder,
+                       Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
+               };
+
                Ok(builder.into())
        }
 } }
@@ -7712,11 +8414,10 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
        ///
        /// # Privacy
        ///
-       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
-       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
-       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
-       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
-       /// order to send the [`Bolt12Invoice`].
+       /// Uses [`MessageRouter`] to construct a [`BlindedPath`] for the refund based on the given
+       /// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
+       /// privacy implications as well as those of the parameterized [`Router`], which implements
+       /// [`MessageRouter`].
        ///
        /// Also, uses a derived payer id in the refund for payer privacy.
        ///
@@ -7732,29 +8433,30 @@ macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
        /// - `amount_msats` is invalid, or
        /// - the parameterized [`Router`] is unable to create a blinded path for the refund.
        ///
-       /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
-       ///
        /// [`Refund`]: crate::offers::refund::Refund
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
        /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
        pub fn create_refund_builder(
-               &$self, description: String, amount_msats: u64, absolute_expiry: Duration,
-               payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
+               &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
+               retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
        ) -> Result<$builder, Bolt12SemanticError> {
                let node_id = $self.get_our_node_id();
                let expanded_key = &$self.inbound_payment_key;
                let entropy = &*$self.entropy_source;
                let secp_ctx = &$self.secp_ctx;
 
-               let path = $self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+               let path = $self.create_blinded_path_using_absolute_expiry(Some(absolute_expiry))
+                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = RefundBuilder::deriving_payer_id(
-                       description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
+                       node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
                )?
                        .chain_hash($self.chain_hash)
                        .absolute_expiry(absolute_expiry)
                        .path(path);
 
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
+
                let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
                $self.pending_outbound_payments
                        .add_new_awaiting_invoice(
@@ -7814,10 +8516,9 @@ where
        ///
        /// # Privacy
        ///
-       /// Uses a one-hop [`BlindedPath`] for the reply path with [`ChannelManager::get_our_node_id`]
-       /// as the introduction node and a derived payer id for payer privacy. As such, currently, the
-       /// node must be announced. Otherwise, there is no way to find a path to the introduction node
-       /// in order to send the [`Bolt12Invoice`].
+       /// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
+       /// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
+       /// docs of the parameterized [`Router`], which implements [`MessageRouter`].
        ///
        /// # Limitations
        ///
@@ -7870,6 +8571,8 @@ where
                let invoice_request = builder.build_and_sign()?;
                let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
                let expiration = StaleExpiration::TimerTicks(1);
                self.pending_outbound_payments
                        .add_new_awaiting_invoice(
@@ -7878,14 +8581,7 @@ where
                        .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
 
                let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
-               if offer.paths().is_empty() {
-                       let message = new_pending_onion_message(
-                               OffersMessage::InvoiceRequest(invoice_request),
-                               Destination::Node(offer.signing_pubkey()),
-                               Some(reply_path),
-                       );
-                       pending_offers_messages.push(message);
-               } else {
+               if !offer.paths().is_empty() {
                        // Send as many invoice requests as there are paths in the offer (with an upper bound).
                        // Using only one path could result in a failure if the path no longer exists. But only
                        // one invoice for a given payment id will be paid, even if more than one is received.
@@ -7898,6 +8594,16 @@ where
                                );
                                pending_offers_messages.push(message);
                        }
+               } else if let Some(signing_pubkey) = offer.signing_pubkey() {
+                       let message = new_pending_onion_message(
+                               OffersMessage::InvoiceRequest(invoice_request),
+                               Destination::Node(signing_pubkey),
+                               Some(reply_path),
+                       );
+                       pending_offers_messages.push(message);
+               } else {
+                       debug_assert!(false);
+                       return Err(Bolt12SemanticError::MissingSigningPubkey);
                }
 
                Ok(())
@@ -7908,7 +8614,7 @@ where
        ///
        /// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
        /// [`BlindedPath`] containing the [`PaymentSecret`] needed to reconstruct the corresponding
-       /// [`PaymentPreimage`].
+       /// [`PaymentPreimage`]. It is returned purely for informational purposes.
        ///
        /// # Limitations
        ///
@@ -7925,7 +8631,9 @@ where
        ///   the invoice.
        ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-       pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
+       pub fn request_refund_payment(
+               &self, refund: &Refund
+       ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
@@ -7937,9 +8645,14 @@ where
                        return Err(Bolt12SemanticError::UnsupportedChain);
                }
 
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
                match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
                        Ok((payment_hash, payment_secret)) => {
-                               let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+                               let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
+                               let payment_paths = self.create_blinded_payment_paths(
+                                       amount_msats, payment_secret, payment_context
+                               )
                                        .map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
                                #[cfg(feature = "std")]
@@ -7962,7 +8675,7 @@ where
                                let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
                                if refund.paths().is_empty() {
                                        let message = new_pending_onion_message(
-                                               OffersMessage::Invoice(invoice),
+                                               OffersMessage::Invoice(invoice.clone()),
                                                Destination::Node(refund.payer_id()),
                                                Some(reply_path),
                                        );
@@ -7978,7 +8691,7 @@ where
                                        }
                                }
 
-                               Ok(())
+                               Ok(invoice)
                        },
                        Err(()) => Err(Bolt12SemanticError::InvalidAmount),
                }
@@ -7990,10 +8703,9 @@ where
        /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
        /// [`PaymentHash`] and [`PaymentPreimage`] for you.
        ///
-       /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
-       /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
-       /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
-       /// passed directly to [`claim_funds`].
+       /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
+       /// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
+       /// should then be passed directly to [`claim_funds`].
        ///
        /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
        ///
@@ -8013,8 +8725,7 @@ where
        /// [`claim_funds`]: Self::claim_funds
        /// [`PaymentClaimable`]: events::Event::PaymentClaimable
        /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
-       /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
-       /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
+       /// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
                min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
@@ -8084,6 +8795,38 @@ where
                inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
        }
 
+       /// Creates a blinded path by delegating to [`MessageRouter`] based on the path's intended
+       /// lifetime.
+       ///
+       /// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
+       /// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
+       /// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
+       fn create_blinded_path_using_absolute_expiry(
+               &self, absolute_expiry: Option<Duration>
+       ) -> Result<BlindedPath, ()> {
+               let now = self.duration_since_epoch();
+               let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
+
+               if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
+                       self.create_compact_blinded_path()
+               } else {
+                       self.create_blinded_path()
+               }
+       }
+
+       pub(super) fn duration_since_epoch(&self) -> Duration {
+               #[cfg(not(feature = "std"))]
+               let now = Duration::from_secs(
+                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+               );
+               #[cfg(feature = "std")]
+               let now = std::time::SystemTime::now()
+                       .duration_since(std::time::SystemTime::UNIX_EPOCH)
+                       .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
+
+               now
+       }
+
        /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
        ///
        /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
@@ -8093,7 +8836,9 @@ where
 
                let peers = self.per_peer_state.read().unwrap()
                        .iter()
-                       .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+                       .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+                       .filter(|(_, peer)| peer.is_connected)
+                       .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
                        .map(|(node_id, _)| *node_id)
                        .collect::<Vec<_>>();
 
@@ -8102,10 +8847,37 @@ where
                        .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
 
+       /// Creates a blinded path by delegating to [`MessageRouter::create_compact_blinded_paths`].
+       ///
+       /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+       fn create_compact_blinded_path(&self) -> Result<BlindedPath, ()> {
+               let recipient = self.get_our_node_id();
+               let secp_ctx = &self.secp_ctx;
+
+               let peers = self.per_peer_state.read().unwrap()
+                       .iter()
+                       .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
+                       .filter(|(_, peer)| peer.is_connected)
+                       .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
+                       .map(|(node_id, peer)| ForwardNode {
+                               node_id: *node_id,
+                               short_channel_id: peer.channel_by_id
+                                       .iter()
+                                       .filter(|(_, channel)| channel.context().is_usable())
+                                       .min_by_key(|(_, channel)| channel.context().channel_creation_height)
+                                       .and_then(|(_, channel)| channel.context().get_short_channel_id()),
+                       })
+                       .collect::<Vec<_>>();
+
+               self.router
+                       .create_compact_blinded_paths(recipient, peers, secp_ctx)
+                       .and_then(|paths| paths.into_iter().next().ok_or(()))
+       }
+
        /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
        /// [`Router::create_blinded_payment_paths`].
        fn create_blinded_payment_paths(
-               &self, amount_msats: u64, payment_secret: PaymentSecret
+               &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
        ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
                let secp_ctx = &self.secp_ctx;
 
@@ -8119,6 +8891,7 @@ where
                                max_cltv_expiry,
                                htlc_minimum_msat: 1,
                        },
+                       payment_context,
                };
                self.router.create_blinded_payment_paths(
                        payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
@@ -8232,7 +9005,7 @@ where
                mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
 
                let logger = WithContext::from(
-                       &self.logger, Some(counterparty_node_id), Some(channel_id),
+                       &self.logger, Some(counterparty_node_id), Some(channel_id), None
                );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8333,7 +9106,7 @@ where
        /// will randomly be placed first or last in the returned array.
        ///
        /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
-       /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+       /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
        /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
@@ -8353,6 +9126,7 @@ where
                                result = NotifyOption::DoPersist;
                        }
 
+                       let mut is_any_peer_connected = false;
                        let mut pending_events = Vec::new();
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
@@ -8361,6 +9135,15 @@ where
                                if peer_state.pending_msg_events.len() > 0 {
                                        pending_events.append(&mut peer_state.pending_msg_events);
                                }
+                               if peer_state.is_connected {
+                                       is_any_peer_connected = true
+                               }
+                       }
+
+                       // Ensure that we are connected to some peers before getting broadcast messages.
+                       if is_any_peer_connected {
+                               let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
+                               pending_events.append(&mut broadcast_msgs);
                        }
 
                        if !pending_events.is_empty() {
@@ -8432,7 +9215,7 @@ where
                        *best_block = BestBlock::new(header.prev_blockhash, new_height)
                }
 
-               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
        }
 }
 
@@ -8458,13 +9241,13 @@ where
                let _persistence_guard =
                        PersistenceNotifierGuard::optionally_notify_skipping_background_events(
                                self, || -> NotifyOption { NotifyOption::DoPersist });
-               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context))
+               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
                        .map(|(a, b)| (a, Vec::new(), b)));
 
                let last_best_block_height = self.best_block.read().unwrap().height;
                if height < last_best_block_height {
                        let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
-                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+                       self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
                }
        }
 
@@ -8481,7 +9264,38 @@ where
                                self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
-               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context)));
+               let mut min_anchor_feerate = None;
+               let mut min_non_anchor_feerate = None;
+               if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
+                       // If we're past the startup phase, update our feerate cache
+                       let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
+                       if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+                               last_days_feerates.pop_front();
+                       }
+                       let anchor_feerate = self.fee_estimator
+                               .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
+                       let non_anchor_feerate = self.fee_estimator
+                               .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
+                       last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
+                       if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
+                               min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
+                               min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
+                       }
+               }
+
+               self.do_chain_event(Some(height), |channel| {
+                       let logger = WithChannelContext::from(&self.logger, &channel.context, None);
+                       if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+                               if let Some(feerate) = min_anchor_feerate {
+                                       channel.check_for_stale_feerate(&logger, feerate)?;
+                               }
+                       } else {
+                               if let Some(feerate) = min_non_anchor_feerate {
+                                       channel.check_for_stale_feerate(&logger, feerate)?;
+                               }
+                       }
+                       channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
+               });
 
                macro_rules! max_time {
                        ($timestamp: expr) => {
@@ -8530,7 +9344,7 @@ where
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
-                                       channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context)).map(|()| (None, Vec::new(), None))
+                                       channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
                                } else { Ok((None, Vec::new(), None)) }
                        } else { Ok((None, Vec::new(), None)) }
                });
@@ -8565,12 +9379,13 @@ where
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
+
                                peer_state.channel_by_id.retain(|_, phase| {
                                        match phase {
                                                // Retain unfunded channels.
                                                ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
                                                // TODO(dual_funding): Combine this match arm with above.
-                                               #[cfg(dual_funding)]
+                                               #[cfg(any(dual_funding, splicing))]
                                                ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
                                                ChannelPhase::Funded(channel) => {
                                                        let res = f(channel);
@@ -8580,7 +9395,7 @@ where
                                                                        timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
                                                                                HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
                                                                }
-                                                               let logger = WithChannelContext::from(&self.logger, &channel.context);
+                                                               let logger = WithChannelContext::from(&self.logger, &channel.context, None);
                                                                if let Some(channel_ready) = channel_ready_opt {
                                                                        send_channel_ready!(self, pending_msg_events, channel, channel_ready);
                                                                        if channel.context.is_usable() {
@@ -8640,7 +9455,8 @@ where
                                                                let reason_message = format!("{}", reason);
                                                                failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
+                                                                       pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
@@ -8704,7 +9520,7 @@ where
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
                                        let logger = WithContext::from(
-                                               &self.logger, None, Some(htlc.prev_channel_id)
+                                               &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
                                        );
                                        log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
@@ -8732,6 +9548,9 @@ where
        }
 
        /// Returns true if this [`ChannelManager`] needs to be persisted.
+       ///
+       /// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
+       /// indicates this should be checked.
        pub fn get_and_clear_needs_persistence(&self) -> bool {
                self.needs_persist_flag.swap(false, Ordering::AcqRel)
        }
@@ -8872,18 +9691,21 @@ where
                         msg.channel_id.clone())), *counterparty_node_id);
        }
 
+       #[cfg(splicing)]
        fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported".to_owned(),
                         msg.channel_id.clone())), *counterparty_node_id);
        }
 
+       #[cfg(splicing)]
        fn handle_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported (splice_ack)".to_owned(),
                         msg.channel_id.clone())), *counterparty_node_id);
        }
 
+       #[cfg(splicing)]
        fn handle_splice_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Splicing not supported (splice_locked)".to_owned(),
@@ -9014,7 +9836,7 @@ where
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
                        log_debug!(
-                               WithContext::from(&self.logger, Some(*counterparty_node_id), None),
+                               WithContext::from(&self.logger, Some(*counterparty_node_id), None, None),
                                "Marking channels with {} disconnected and generating channel_updates.",
                                log_pubkey!(counterparty_node_id)
                        );
@@ -9025,27 +9847,30 @@ where
                                peer_state.channel_by_id.retain(|_, phase| {
                                        let context = match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
                                                                // We only retain funded channels that are not shutdown.
                                                                return true;
                                                        }
                                                        &mut chan.context
                                                },
-                                               // We retain UnfundedOutboundV1 channel for some time in case
-                                               // peer unexpectedly disconnects, and intends to reconnect again.
-                                               ChannelPhase::UnfundedOutboundV1(_) => {
-                                                       return true;
-                                               },
+                                               // If we get disconnected and haven't yet committed to a funding
+                                               // transaction, we can replay the `open_channel` on reconnection, so don't
+                                               // bother dropping the channel here. However, if we already committed to
+                                               // the funding transaction we don't yet support replaying the funding
+                                               // handshake (and bailing if the peer rejects it), so we force-close in
+                                               // that case.
+                                               ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
+                                               ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
                                                // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
-                                               #[cfg(dual_funding)]
+                                               #[cfg(any(dual_funding, splicing))]
                                                ChannelPhase::UnfundedOutboundV2(chan) => {
                                                        &mut chan.context
                                                },
-                                               #[cfg(dual_funding)]
+                                               #[cfg(any(dual_funding, splicing))]
                                                ChannelPhase::UnfundedInboundV2(chan) => {
                                                        &mut chan.context
                                                },
@@ -9097,7 +9922,12 @@ where
                                                // Gossip
                                                &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
                                                &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
-                                               &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+                                               // [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
+                                               // This check here is to ensure exhaustivity.
+                                               &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
+                                                       debug_assert!(false, "This event shouldn't have been here");
+                                                       false
+                                               },
                                                &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                                &events::MessageSendEvent::SendChannelUpdate { .. } => false,
                                                &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
@@ -9122,7 +9952,7 @@ where
        }
 
        fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
-               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
                if !init_msg.features.supports_static_remote_key() {
                        log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
                        return Err(());
@@ -9187,7 +10017,7 @@ where
                                for (_, phase) in peer_state.channel_by_id.iter_mut() {
                                        match phase {
                                                ChannelPhase::Funded(chan) => {
-                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context, None);
                                                        pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
                                                                node_id: chan.context.get_counterparty_node_id(),
                                                                msg: chan.get_channel_reestablish(&&logger),
@@ -9201,8 +10031,8 @@ where
                                                        });
                                                }
 
-                                               // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
-                                               #[cfg(dual_funding)]
+                                               // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+                                               #[cfg(any(dual_funding, splicing))]
                                                ChannelPhase::UnfundedOutboundV2(chan) => {
                                                        pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
                                                                node_id: chan.context.get_counterparty_node_id(),
@@ -9217,8 +10047,8 @@ where
                                                        debug_assert!(false);
                                                }
 
-                                               // TODO(dual_funding): Combine this match arm with above once #[cfg(dual_funding)] is removed.
-                                               #[cfg(dual_funding)]
+                                               // TODO(dual_funding): Combine this match arm with above once #[cfg(any(dual_funding, splicing))] is removed.
+                                               #[cfg(any(dual_funding, splicing))]
                                                ChannelPhase::UnfundedInboundV2(channel) => {
                                                        // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
                                                        // they are not persisted and won't be recovered after a crash.
@@ -9236,8 +10066,6 @@ where
        }
 
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-
                match &msg.data as &str {
                        "cannot co-op close channel w/ active htlcs"|
                        "link failed to shutdown" =>
@@ -9250,34 +10078,45 @@ where
                                // We're not going to bother handling this in a sensible way, instead simply
                                // repeating the Shutdown message on repeat until morale improves.
                                if !msg.channel_id.is_zero() {
-                                       let per_peer_state = self.per_peer_state.read().unwrap();
-                                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
-                                       if peer_state_mutex_opt.is_none() { return; }
-                                       let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
-                                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
-                                               if let Some(msg) = chan.get_outbound_shutdown() {
-                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                               node_id: *counterparty_node_id,
-                                                               msg,
-                                                       });
-                                               }
-                                               peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
-                                                       node_id: *counterparty_node_id,
-                                                       action: msgs::ErrorAction::SendWarningMessage {
-                                                               msg: msgs::WarningMessage {
-                                                                       channel_id: msg.channel_id,
-                                                                       data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
-                                                               },
-                                                               log_level: Level::Trace,
+                                       PersistenceNotifierGuard::optionally_notify(
+                                               self,
+                                               || -> NotifyOption {
+                                                       let per_peer_state = self.per_peer_state.read().unwrap();
+                                                       let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+                                                       if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
+                                                       let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+                                                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
+                                                               if let Some(msg) = chan.get_outbound_shutdown() {
+                                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                                               node_id: *counterparty_node_id,
+                                                                               msg,
+                                                                       });
+                                                               }
+                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                                       node_id: *counterparty_node_id,
+                                                                       action: msgs::ErrorAction::SendWarningMessage {
+                                                                               msg: msgs::WarningMessage {
+                                                                                       channel_id: msg.channel_id,
+                                                                                       data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+                                                                               },
+                                                                               log_level: Level::Trace,
+                                                                       }
+                                                               });
+                                                               // This can happen in a fairly tight loop, so we absolutely cannot trigger
+                                                               // a `ChannelManager` write here.
+                                                               return NotifyOption::SkipPersistHandleEvents;
                                                        }
-                                               });
-                                       }
+                                                       NotifyOption::SkipPersistNoEvents
+                                               }
+                                       );
                                }
                                return;
                        }
                        _ => {}
                }
 
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
                if msg.channel_id.is_zero() {
                        let channel_ids: Vec<ChannelId> = {
                                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -9312,7 +10151,7 @@ where
                                                        return;
                                                }
                                        },
-                                       #[cfg(dual_funding)]
+                                       #[cfg(any(dual_funding, splicing))]
                                        Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
                                                if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
                                                        peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
@@ -9323,7 +10162,7 @@ where
                                                }
                                        },
                                        None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::Funded(_)) => (),
-                                       #[cfg(dual_funding)]
+                                       #[cfg(any(dual_funding, splicing))]
                                        Some(ChannelPhase::UnfundedInboundV2(_)) => (),
                                }
                        }
@@ -9412,23 +10251,27 @@ where
        R::Target: Router,
        L::Target: Logger,
 {
-       fn handle_message(&self, message: OffersMessage) -> Option<OffersMessage> {
+       fn handle_message(&self, message: OffersMessage, responder: Option<Responder>) -> ResponseInstruction<OffersMessage> {
                let secp_ctx = &self.secp_ctx;
                let expanded_key = &self.inbound_payment_key;
 
                match message {
                        OffersMessage::InvoiceRequest(invoice_request) => {
+                               let responder = match responder {
+                                       Some(responder) => responder,
+                                       None => return ResponseInstruction::NoResponse,
+                               };
                                let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
                                        &invoice_request
                                ) {
                                        Ok(amount_msats) => amount_msats,
-                                       Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
+                                       Err(error) => return responder.respond(OffersMessage::InvoiceError(error.into())),
                                };
                                let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
                                        Ok(invoice_request) => invoice_request,
                                        Err(()) => {
                                                let error = Bolt12SemanticError::InvalidMetadata;
-                                               return Some(OffersMessage::InvoiceError(error.into()));
+                                               return responder.respond(OffersMessage::InvoiceError(error.into()));
                                        },
                                };
 
@@ -9439,17 +10282,21 @@ where
                                        Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
                                        Err(()) => {
                                                let error = Bolt12SemanticError::InvalidAmount;
-                                               return Some(OffersMessage::InvoiceError(error.into()));
+                                               return responder.respond(OffersMessage::InvoiceError(error.into()));
                                        },
                                };
 
+                               let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+                                       offer_id: invoice_request.offer_id,
+                                       invoice_request: invoice_request.fields(),
+                               });
                                let payment_paths = match self.create_blinded_payment_paths(
-                                       amount_msats, payment_secret
+                                       amount_msats, payment_secret, payment_context
                                ) {
                                        Ok(payment_paths) => payment_paths,
                                        Err(()) => {
                                                let error = Bolt12SemanticError::MissingPaths;
-                                               return Some(OffersMessage::InvoiceError(error.into()));
+                                               return responder.respond(OffersMessage::InvoiceError(error.into()));
                                        },
                                };
 
@@ -9458,7 +10305,7 @@ where
                                        self.highest_seen_timestamp.load(Ordering::Acquire) as u64
                                );
 
-                               if invoice_request.keys.is_some() {
+                               let response = if invoice_request.keys.is_some() {
                                        #[cfg(feature = "std")]
                                        let builder = invoice_request.respond_using_derived_keys(
                                                payment_paths, payment_hash
@@ -9467,12 +10314,10 @@ where
                                        let builder = invoice_request.respond_using_derived_keys_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
-                                       let builder: Result<InvoiceBuilder<DerivedSigningPubkey>, _> =
-                                               builder.map(|b| b.into());
-                                       match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
-                                               Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
-                                               Err(error) => Some(OffersMessage::InvoiceError(error.into())),
-                                       }
+                                       builder
+                                               .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
+                                               .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
+                                               .map_err(InvoiceError::from)
                                } else {
                                        #[cfg(feature = "std")]
                                        let builder = invoice_request.respond_with(payment_paths, payment_hash);
@@ -9480,52 +10325,72 @@ where
                                        let builder = invoice_request.respond_with_no_std(
                                                payment_paths, payment_hash, created_at
                                        );
-                                       let builder: Result<InvoiceBuilder<ExplicitSigningPubkey>, _> =
-                                               builder.map(|b| b.into());
-                                       let response = builder.and_then(|builder| builder.allow_mpp().build())
-                                               .map_err(|e| OffersMessage::InvoiceError(e.into()))
+                                       builder
+                                               .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
+                                               .and_then(|builder| builder.allow_mpp().build())
+                                               .map_err(InvoiceError::from)
                                                .and_then(|invoice| {
                                                        #[cfg(c_bindings)]
                                                        let mut invoice = invoice;
-                                                       match invoice.sign(|invoice: &UnsignedBolt12Invoice|
-                                                               self.node_signer.sign_bolt12_invoice(invoice)
-                                                       ) {
-                                                               Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
-                                                               Err(SignError::Signing) => Err(OffersMessage::InvoiceError(
-                                                                               InvoiceError::from_string("Failed signing invoice".to_string())
-                                                               )),
-                                                               Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
-                                                                               InvoiceError::from_string("Failed invoice signature verification".to_string())
-                                                               )),
-                                                       }
-                                               });
-                                       match response {
-                                               Ok(invoice) => Some(invoice),
-                                               Err(error) => Some(error),
-                                       }
+                                                       invoice
+                                                               .sign(|invoice: &UnsignedBolt12Invoice|
+                                                                       self.node_signer.sign_bolt12_invoice(invoice)
+                                                               )
+                                                               .map_err(InvoiceError::from)
+                                               })
+                               };
+
+                               match response {
+                                       Ok(invoice) => responder.respond(OffersMessage::Invoice(invoice)),
+                                       Err(error) => responder.respond(OffersMessage::InvoiceError(error.into())),
                                }
                        },
                        OffersMessage::Invoice(invoice) => {
-                               match invoice.verify(expanded_key, secp_ctx) {
-                                       Err(()) => {
-                                               Some(OffersMessage::InvoiceError(InvoiceError::from_string("Unrecognized invoice".to_owned())))
-                                       },
-                                       Ok(_) if invoice.invoice_features().requires_unknown_bits_from(&self.bolt12_invoice_features()) => {
-                                               Some(OffersMessage::InvoiceError(Bolt12SemanticError::UnknownRequiredFeatures.into()))
-                                       },
+                               let result = match invoice.verify(expanded_key, secp_ctx) {
                                        Ok(payment_id) => {
-                                               if let Err(e) = self.send_payment_for_bolt12_invoice(&invoice, payment_id) {
-                                                       log_trace!(self.logger, "Failed paying invoice: {:?}", e);
-                                                       Some(OffersMessage::InvoiceError(InvoiceError::from_string(format!("{:?}", e))))
+                                               let features = self.bolt12_invoice_features();
+                                               if invoice.invoice_features().requires_unknown_bits_from(&features) {
+                                                       Err(InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures))
+                                               } else if self.default_configuration.manually_handle_bolt12_invoices {
+                                                       let event = Event::InvoiceReceived { payment_id, invoice, responder };
+                                                       self.pending_events.lock().unwrap().push_back((event, None));
+                                                       return ResponseInstruction::NoResponse;
                                                } else {
-                                                       None
+                                                       self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id)
+                                                               .map_err(|e| {
+                                                                       log_trace!(self.logger, "Failed paying invoice: {:?}", e);
+                                                                       InvoiceError::from_string(format!("{:?}", e))
+                                                               })
                                                }
                                        },
+                                       Err(()) => Err(InvoiceError::from_string("Unrecognized invoice".to_owned())),
+                               };
+
+                               match result {
+                                       Ok(()) => ResponseInstruction::NoResponse,
+                                       Err(e) => match responder {
+                                               Some(responder) => responder.respond(OffersMessage::InvoiceError(e)),
+                                               None => {
+                                                       log_trace!(self.logger, "No reply path for sending invoice error: {:?}", e);
+                                                       ResponseInstruction::NoResponse
+                                               },
+                                       },
+                               }
+                       },
+                       #[cfg(async_payments)]
+                       OffersMessage::StaticInvoice(_invoice) => {
+                               match responder {
+                                       Some(responder) => {
+                                               responder.respond(OffersMessage::InvoiceError(
+                                                               InvoiceError::from_string("Static invoices not yet supported".to_string())
+                                               ))
+                                       },
+                                       None => return ResponseInstruction::NoResponse,
                                }
                        },
                        OffersMessage::InvoiceError(invoice_error) => {
                                log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
-                               None
+                               ResponseInstruction::NoResponse
                        },
                }
        }
@@ -9535,6 +10400,23 @@ where
        }
 }
 
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+       T::Target: BroadcasterInterface,
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+       SP::Target: SignerProvider,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
+{
+       fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
+               self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
+       }
+}
+
 /// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
 /// [`ChannelManager`].
 pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
@@ -9599,140 +10481,6 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
 const SERIALIZATION_VERSION: u8 = 1;
 const MIN_SERIALIZATION_VERSION: u8 = 1;
 
-impl_writeable_tlv_based!(CounterpartyForwardingInfo, {
-       (2, fee_base_msat, required),
-       (4, fee_proportional_millionths, required),
-       (6, cltv_expiry_delta, required),
-});
-
-impl_writeable_tlv_based!(ChannelCounterparty, {
-       (2, node_id, required),
-       (4, features, required),
-       (6, unspendable_punishment_reserve, required),
-       (8, forwarding_info, option),
-       (9, outbound_htlc_minimum_msat, option),
-       (11, outbound_htlc_maximum_msat, option),
-});
-
-impl Writeable for ChannelDetails {
-       fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
-               // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
-               let user_channel_id_low = self.user_channel_id as u64;
-               let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
-               write_tlv_fields!(writer, {
-                       (1, self.inbound_scid_alias, option),
-                       (2, self.channel_id, required),
-                       (3, self.channel_type, option),
-                       (4, self.counterparty, required),
-                       (5, self.outbound_scid_alias, option),
-                       (6, self.funding_txo, option),
-                       (7, self.config, option),
-                       (8, self.short_channel_id, option),
-                       (9, self.confirmations, option),
-                       (10, self.channel_value_satoshis, required),
-                       (12, self.unspendable_punishment_reserve, option),
-                       (14, user_channel_id_low, required),
-                       (16, self.balance_msat, required),
-                       (18, self.outbound_capacity_msat, required),
-                       (19, self.next_outbound_htlc_limit_msat, required),
-                       (20, self.inbound_capacity_msat, required),
-                       (21, self.next_outbound_htlc_minimum_msat, required),
-                       (22, self.confirmations_required, option),
-                       (24, self.force_close_spend_delay, option),
-                       (26, self.is_outbound, required),
-                       (28, self.is_channel_ready, required),
-                       (30, self.is_usable, required),
-                       (32, self.is_public, required),
-                       (33, self.inbound_htlc_minimum_msat, option),
-                       (35, self.inbound_htlc_maximum_msat, option),
-                       (37, user_channel_id_high_opt, option),
-                       (39, self.feerate_sat_per_1000_weight, option),
-                       (41, self.channel_shutdown_state, option),
-                       (43, self.pending_inbound_htlcs, optional_vec),
-                       (45, self.pending_outbound_htlcs, optional_vec),
-               });
-               Ok(())
-       }
-}
-
-impl Readable for ChannelDetails {
-       fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
-               _init_and_read_len_prefixed_tlv_fields!(reader, {
-                       (1, inbound_scid_alias, option),
-                       (2, channel_id, required),
-                       (3, channel_type, option),
-                       (4, counterparty, required),
-                       (5, outbound_scid_alias, option),
-                       (6, funding_txo, option),
-                       (7, config, option),
-                       (8, short_channel_id, option),
-                       (9, confirmations, option),
-                       (10, channel_value_satoshis, required),
-                       (12, unspendable_punishment_reserve, option),
-                       (14, user_channel_id_low, required),
-                       (16, balance_msat, required),
-                       (18, outbound_capacity_msat, required),
-                       // Note that by the time we get past the required read above, outbound_capacity_msat will be
-                       // filled in, so we can safely unwrap it here.
-                       (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
-                       (20, inbound_capacity_msat, required),
-                       (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
-                       (22, confirmations_required, option),
-                       (24, force_close_spend_delay, option),
-                       (26, is_outbound, required),
-                       (28, is_channel_ready, required),
-                       (30, is_usable, required),
-                       (32, is_public, required),
-                       (33, inbound_htlc_minimum_msat, option),
-                       (35, inbound_htlc_maximum_msat, option),
-                       (37, user_channel_id_high_opt, option),
-                       (39, feerate_sat_per_1000_weight, option),
-                       (41, channel_shutdown_state, option),
-                       (43, pending_inbound_htlcs, optional_vec),
-                       (45, pending_outbound_htlcs, optional_vec),
-               });
-
-               // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
-               // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
-               let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
-               let user_channel_id = user_channel_id_low as u128 +
-                       ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
-
-               Ok(Self {
-                       inbound_scid_alias,
-                       channel_id: channel_id.0.unwrap(),
-                       channel_type,
-                       counterparty: counterparty.0.unwrap(),
-                       outbound_scid_alias,
-                       funding_txo,
-                       config,
-                       short_channel_id,
-                       channel_value_satoshis: channel_value_satoshis.0.unwrap(),
-                       unspendable_punishment_reserve,
-                       user_channel_id,
-                       balance_msat: balance_msat.0.unwrap(),
-                       outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
-                       next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
-                       next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
-                       inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
-                       confirmations_required,
-                       confirmations,
-                       force_close_spend_delay,
-                       is_outbound: is_outbound.0.unwrap(),
-                       is_channel_ready: is_channel_ready.0.unwrap(),
-                       is_usable: is_usable.0.unwrap(),
-                       is_public: is_public.0.unwrap(),
-                       inbound_htlc_minimum_msat,
-                       inbound_htlc_maximum_msat,
-                       feerate_sat_per_1000_weight,
-                       channel_shutdown_state,
-                       pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
-                       pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
-               })
-       }
-}
-
 impl_writeable_tlv_based!(PhantomRouteHints, {
        (2, channels, required_vec),
        (4, phantom_scid, required),
@@ -9757,9 +10505,11 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting,
                (3, payment_metadata, option),
                (5, custom_tlvs, optional_vec),
                (7, requires_blinded_error, (default_value, false)),
+               (9, payment_context, option),
        },
        (2, ReceiveKeysend) => {
                (0, payment_preimage, required),
+               (1, requires_blinded_error, (default_value, false)),
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
                (4, payment_data, option), // Added in 0.0.116
@@ -9871,7 +10621,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                let (payment_data, keysend_preimage) = match &self.onion_payload {
-                       OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
+                       OnionPayload::Invoice { _legacy_hop_data } => {
+                               (_legacy_hop_data.as_ref(), None)
+                       },
                        OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
                };
                write_tlv_fields!(writer, {
@@ -10116,9 +10868,10 @@ where
                        best_block.block_hash.write(writer)?;
                }
 
+               let per_peer_state = self.per_peer_state.write().unwrap();
+
                let mut serializable_peer_count: u64 = 0;
                {
-                       let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut number_of_funded_channels = 0;
                        for (_, peer_state_mutex) in per_peer_state.iter() {
                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -10159,7 +10912,11 @@ where
                        }
                }
 
-               let per_peer_state = self.per_peer_state.write().unwrap();
+               let mut decode_update_add_htlcs_opt = None;
+               let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
+               if !decode_update_add_htlcs.is_empty() {
+                       decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
+               }
 
                let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
                let claimable_payments = self.claimable_payments.lock().unwrap();
@@ -10310,6 +11067,7 @@ where
                        (10, in_flight_monitor_updates, option),
                        (11, self.probing_cookie_secret, required),
                        (13, htlc_onion_fields, optional_vec),
+                       (14, decode_update_add_htlcs_opt, option),
                });
 
                Ok(())
@@ -10357,14 +11115,6 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
        }
 }
 
-impl_writeable_tlv_based_enum!(ChannelShutdownState,
-       (0, NotShuttingDown) => {},
-       (2, ShutdownInitiated) => {},
-       (4, ResolvingHTLCs) => {},
-       (6, NegotiatingClosingFee) => {},
-       (8, ShutdownComplete) => {}, ;
-);
-
 /// Arguments for the creation of a ChannelManager that are not deserialized.
 ///
 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
@@ -10540,7 +11290,7 @@ where
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
-                       let logger = WithChannelContext::from(&args.logger, &channel.context);
+                       let logger = WithChannelContext::from(&args.logger, &channel.context, None);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
                        funding_txo_set.insert(funding_txo.clone());
@@ -10599,6 +11349,7 @@ where
                                                        // claim update ChannelMonitor updates were persisted prior to persising
                                                        // the ChannelMonitor update for the forward leg, so attempting to fail the
                                                        // backwards leg of the HTLC will simply be rejected.
+                                                       let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
                                                        log_info!(logger,
                                                                "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
                                                                &channel.context.channel_id(), &payment_hash);
@@ -10606,9 +11357,10 @@ where
                                                }
                                        }
                                } else {
-                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+                                       channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
+                                       log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
                                                &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
-                                               monitor.get_latest_update_id());
+                                               monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
@@ -10652,7 +11404,7 @@ where
 
                for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
-                               let logger = WithChannelMonitor::from(&args.logger, monitor);
+                               let logger = WithChannelMonitor::from(&args.logger, monitor, None);
                                let channel_id = monitor.channel_id();
                                log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
                                        &channel_id);
@@ -10775,6 +11527,7 @@ where
                let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
                let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
+               let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (2, pending_intercepted_htlcs, option),
@@ -10788,7 +11541,9 @@ where
                        (10, in_flight_monitor_updates, option),
                        (11, probing_cookie_secret, option),
                        (13, claimable_htlc_onion_fields, optional_vec),
+                       (14, decode_update_add_htlcs, option),
                });
+               let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
                }
@@ -10872,7 +11627,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for phase in peer_state.channel_by_id.values() {
                                if let ChannelPhase::Funded(chan) = phase {
-                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context, None);
 
                                        // Channels that were persisted have to be funded, otherwise they should have been
                                        // discarded.
@@ -10888,7 +11643,7 @@ where
                                                }
                                        }
                                        if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
-                                               // If the channel is ahead of the monitor, return InvalidValue:
+                                               // If the channel is ahead of the monitor, return DangerousValue:
                                                log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
                                                log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
                                                        chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
@@ -10897,7 +11652,7 @@ where
                                                log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                                log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                                log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
-                                               return Err(DecodeError::InvalidValue);
+                                               return Err(DecodeError::DangerousValue);
                                        }
                                } else {
                                        // We shouldn't have persisted (or read) any unfunded channel types so none should have been
@@ -10911,7 +11666,7 @@ where
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
                                let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
-                               let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
+                               let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
@@ -10930,6 +11685,7 @@ where
                                        log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
                                        log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+                                       log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
                                        return Err(DecodeError::InvalidValue);
                                }
                        }
@@ -10955,8 +11711,8 @@ where
                        for (_, monitor) in args.channel_monitors.iter() {
                                let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
                                if counterparty_opt.is_none() {
-                                       let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
+                                               let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
                                                        if path.hops.is_empty() {
                                                                log_error!(logger, "Got an empty path for a pending payment");
@@ -10997,6 +11753,7 @@ where
                                                }
                                        }
                                        for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
+                                               let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
                                                match htlc_source {
                                                        HTLCSource::PreviousHopData(prev_hop_data) => {
                                                                let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
@@ -11008,6 +11765,18 @@ where
                                                                // still have an entry for this HTLC in `forward_htlcs` or
                                                                // `pending_intercepted_htlcs`, we were apparently not persisted after
                                                                // the monitor was when forwarding the payment.
+                                                               decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
+                                                                       update_add_htlcs.retain(|update_add_htlc| {
+                                                                               let matches = *scid == prev_hop_data.short_channel_id &&
+                                                                                       update_add_htlc.htlc_id == prev_hop_data.htlc_id;
+                                                                               if matches {
+                                                                                       log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
+                                                                                               &htlc.payment_hash, &monitor.channel_id());
+                                                                               }
+                                                                               !matches
+                                                                       });
+                                                                       !update_add_htlcs.is_empty()
+                                                               });
                                                                forward_htlcs.retain(|_, forwards| {
                                                                        forwards.retain(|forward| {
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
@@ -11089,7 +11858,7 @@ where
                        }
                }
 
-               if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
+               if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
                        // If we have pending HTLCs to forward, assume we either dropped a
                        // `PendingHTLCsForwardable` or the user received it but never processed it as they
                        // shut down before the timer hit. Either way, set the time_forwardable to a small
@@ -11138,7 +11907,7 @@ where
                                let purpose = match &htlcs[0].onion_payload {
                                        OnionPayload::Invoice { _legacy_hop_data } => {
                                                if let Some(hop_data) = _legacy_hop_data {
-                                                       events::PaymentPurpose::InvoicePayment {
+                                                       events::PaymentPurpose::Bolt11InvoicePayment {
                                                                payment_preimage: match pending_inbound_payments.get(&payment_hash) {
                                                                        Some(inbound_payment) => inbound_payment.payment_preimage,
                                                                        None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
@@ -11182,7 +11951,7 @@ where
                        let peer_state = &mut *peer_state_lock;
                        for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
                                if let ChannelPhase::Funded(chan) = phase {
-                                       let logger = WithChannelContext::from(&args.logger, &chan.context);
+                                       let logger = WithChannelContext::from(&args.logger, &chan.context, None);
                                        if chan.context.outbound_scid_alias() == 0 {
                                                let mut outbound_scid_alias;
                                                loop {
@@ -11252,7 +12021,7 @@ where
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
-                                                               let logger = WithChannelContext::from(&args.logger, &channel.context);
+                                                               let logger = WithChannelContext::from(&args.logger, &channel.context, Some(payment_hash));
                                                                channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger);
                                                        }
                                                }
@@ -11267,6 +12036,7 @@ where
                                                amount_msat: claimable_amt_msat,
                                                htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
                                                sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
+                                               onion_fields: payment.onion_fields,
                                        }, None));
                                }
                        }
@@ -11275,7 +12045,7 @@ where
                for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
                        if let Some(peer_state) = per_peer_state.get(&node_id) {
                                for (channel_id, actions) in monitor_update_blocked_actions.iter() {
-                                       let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id));
+                                       let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
@@ -11303,7 +12073,7 @@ where
                                }
                                peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
                        } else {
-                               log_error!(WithContext::from(&args.logger, Some(node_id), None), "Got blocked actions without a per-peer-state for {}", node_id);
+                               log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id);
                                return Err(DecodeError::InvalidValue);
                        }
                }
@@ -11323,6 +12093,7 @@ where
                        pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
 
                        forward_htlcs: Mutex::new(forward_htlcs),
+                       decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
                        outpoint_to_peer: Mutex::new(outpoint_to_peer),
@@ -11351,10 +12122,14 @@ where
 
                        pending_offers_messages: Mutex::new(Vec::new()),
 
+                       pending_broadcast_messages: Mutex::new(Vec::new()),
+
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
                        signer_provider: args.signer_provider,
 
+                       last_days_feerates: Mutex::new(VecDeque::new()),
+
                        logger: args.logger,
                        default_configuration: args.default_config,
                };
@@ -11371,7 +12146,9 @@ where
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
                        channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
-                               downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
+                               downstream_closed, true, downstream_node_id, downstream_funding,
+                               downstream_channel_id, None
+                       );
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -11388,8 +12165,7 @@ mod tests {
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
        use core::sync::atomic::Ordering;
        use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
-       use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use crate::ln::ChannelId;
+       use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
        use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
        use crate::ln::functional_test_utils::*;
        use crate::ln::msgs::{self, ErrorAction};
@@ -11880,6 +12656,61 @@ mod tests {
                }
        }
 
+       #[test]
+       fn test_channel_update_cached() {
+               let chanmon_cfgs = create_chanmon_cfgs(3);
+               let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+               let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
+
+               // Confirm that the channel_update was not sent immediately to node[1] but was cached.
+               let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_1_events.len(), 0);
+
+               {
+                       // Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
+                       let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+                       assert_eq!(pending_broadcast_messages.len(), 1);
+               }
+
+               // Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+               nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+
+               let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_0_events.len(), 0);
+
+               // Now we reconnect to a peer
+               nodes[0].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[2].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               nodes[2].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
+
+               // Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
+               let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(node_0_events.len(), 1);
+               match &node_0_events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => (),
+                       _ => panic!("Unexpected event"),
+               }
+               {
+                       // Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
+                       let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
+                       assert_eq!(pending_broadcast_messages.len(), 0);
+               }
+       }
+
        #[test]
        fn test_drop_disconnected_peers_when_removing_channels() {
                let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -11891,11 +12722,11 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-
-               nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+               let error_message = "Channel force-closed";
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
 
                {
                        // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
@@ -12109,6 +12940,7 @@ mod tests {
                let channel_id = ChannelId::from_bytes([4; 32]);
                let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
                let intercept_id = InterceptId([0; 32]);
+               let error_message = "Channel force-closed";
 
                // Test the API functions.
                check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
@@ -12117,9 +12949,9 @@ mod tests {
 
                check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
 
-               check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+               check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
 
-               check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+               check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
 
                check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
 
@@ -12141,15 +12973,16 @@ mod tests {
 
                // Dummy values
                let channel_id = ChannelId::from_bytes([4; 32]);
+               let error_message = "Channel force-closed";
 
                // Test the API functions.
                check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
 
                check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
 
-               check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+               check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
 
-               check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
+               check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
 
                check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
 
@@ -12503,6 +13336,7 @@ mod tests {
                anchors_config.manually_accept_inbound_channels = true;
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+               let error_message = "Channel force-closed";
 
                nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
                let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -12512,7 +13346,7 @@ mod tests {
                let events = nodes[1].node.get_and_clear_pending_events();
                match events[0] {
                        Event::OpenChannelRequest { temporary_channel_id, .. } => {
-                               nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+                               nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
                        }
                        _ => panic!("Unexpected event"),
                }
@@ -12620,15 +13454,16 @@ mod tests {
                let user_config = test_default_channel_config();
                let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
                let nodes = create_network(2, &node_cfg, &node_chanmgr);
+               let error_message = "Channel force-closed";
 
                // Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
                let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
                check_closed_broadcast(&nodes[0], 1, true);
                check_added_monitors(&nodes[0], 1);
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
                {
                        let txn = nodes[0].tx_broadcaster.txn_broadcast();
                        assert_eq!(txn.len(), 1);
@@ -12746,10 +13581,12 @@ pub mod bench {
        use crate::util::test_utils;
        use crate::util::config::{UserConfig, MaxDustHTLCExposure};
 
+       use bitcoin::amount::Amount;
        use bitcoin::blockdata::locktime::absolute::LockTime;
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::{Transaction, TxOut};
+       use bitcoin::transaction::Version;
 
        use crate::sync::{Arc, Mutex, RwLock};
 
@@ -12826,8 +13663,8 @@ pub mod bench {
 
                let tx;
                if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
-                       tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
-                               value: 8_000_000, script_pubkey: output_script,
+                       tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
+                               value: Amount::from_sat(8_000_000), script_pubkey: output_script,
                        }]};
                        node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
                } else { panic!(); }