Rework get_latest_holder_commitment_txn to broadcast for users
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 9536a9366e18506fa586d96c325b7d725e677de2..ea5c1bb88187131a0262e044ed3c89383a179cf7 100644 (file)
@@ -43,14 +43,13 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+use crate::ln::channel::{self, Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext};
+pub use crate::ln::channel::{InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
 use crate::ln::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::Bolt11InvoiceFeatures;
-use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{BlindedTail, DefaultRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
-use crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters};
-use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundOnionErr, NextPacketDetails};
+use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
+use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
 use crate::ln::msgs;
 use crate::ln::onion_utils;
 use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
@@ -65,8 +64,9 @@ use crate::offers::merkle::SignError;
 use crate::offers::offer::{DerivedMetadata, Offer, OfferBuilder};
 use crate::offers::parse::Bolt12SemanticError;
 use crate::offers::refund::{Refund, RefundBuilder};
-use crate::onion_message::{Destination, OffersMessage, OffersMessageHandler, PendingOnionMessage, new_pending_onion_message};
-use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider};
+use crate::onion_message::messenger::{Destination, MessageRouter, PendingOnionMessage, new_pending_onion_message};
+use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
+use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
 use crate::sign::ecdsa::WriteableEcdsaChannelSigner;
 use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
@@ -75,6 +75,13 @@ use crate::util::string::UntrustedString;
 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
 use crate::util::logger::{Level, Logger, WithContext};
 use crate::util::errors::APIError;
+#[cfg(not(c_bindings))]
+use {
+       crate::routing::router::DefaultRouter,
+       crate::routing::gossip::NetworkGraph,
+       crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
+       crate::sign::KeysManager,
+};
 
 use alloc::collections::{btree_map, BTreeMap};
 
@@ -196,15 +203,16 @@ pub struct BlindedForward {
        /// onion payload if we're the introduction node. Useful for calculating the next hop's
        /// [`msgs::UpdateAddHTLC::blinding_point`].
        pub inbound_blinding_point: PublicKey,
-       // Another field will be added here when we support forwarding as a non-intro node.
+       /// If needed, this determines how this HTLC should be failed backwards, based on whether we are
+       /// the introduction node.
+       pub failure: BlindedFailure,
 }
 
 impl PendingHTLCRouting {
        // Used to override the onion failure code and data if the HTLC is blinded.
        fn blinded_failure(&self) -> Option<BlindedFailure> {
-               // TODO: needs update when we support forwarding blinded HTLCs as non-intro node
                match self {
-                       Self::Forward { blinded: Some(_), .. } => Some(BlindedFailure::FromIntroductionNode),
+                       Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
                        Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
                        _ => None,
                }
@@ -281,6 +289,7 @@ pub(super) struct PendingAddHTLCInfo {
        // Note that this may be an outbound SCID alias for the associated channel.
        prev_short_channel_id: u64,
        prev_htlc_id: u64,
+       prev_channel_id: ChannelId,
        prev_funding_outpoint: OutPoint,
        prev_user_channel_id: u128,
 }
@@ -299,10 +308,15 @@ pub(super) enum HTLCForwardInfo {
        },
 }
 
-// Used for failing blinded HTLCs backwards correctly.
+/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
+/// which determines the failure message that should be used.
 #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
-enum BlindedFailure {
+pub enum BlindedFailure {
+       /// This HTLC is being failed backwards by the introduction node, and thus should be failed with
+       /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
        FromIntroductionNode,
+       /// This HTLC is being failed backwards by a blinded node within the path, and thus should be
+       /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
        FromBlindedNode,
 }
 
@@ -316,6 +330,7 @@ pub(crate) struct HTLCPreviousHopData {
        incoming_packet_shared_secret: [u8; 32],
        phantom_shared_secret: Option<[u8; 32]>,
        blinded_failure: Option<BlindedFailure>,
+       channel_id: ChannelId,
 
        // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
        // channel with a preimage provided by the forward channel.
@@ -356,7 +371,7 @@ struct ClaimableHTLC {
 impl From<&ClaimableHTLC> for events::ClaimedHTLC {
        fn from(val: &ClaimableHTLC) -> Self {
                events::ClaimedHTLC {
-                       channel_id: val.prev_hop.outpoint.to_channel_id(),
+                       channel_id: val.prev_hop.channel_id,
                        user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
                        cltv_expiry: val.cltv_expiry,
                        value_msat: val.value,
@@ -545,9 +560,8 @@ impl Into<u16> for FailureCode {
 
 struct MsgHandleErrInternal {
        err: msgs::LightningError,
-       chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed
+       closes_channel: bool,
        shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
-       channel_capacity: Option<u64>,
 }
 impl MsgHandleErrInternal {
        #[inline]
@@ -562,17 +576,16 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
        #[inline]
        fn from_no_close(err: msgs::LightningError) -> Self {
-               Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
+               Self { err, closes_channel: false, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
                let action = if shutdown_res.monitor_update.is_some() {
                        // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
@@ -584,9 +597,8 @@ impl MsgHandleErrInternal {
                };
                Self {
                        err: LightningError { err, action },
-                       chan_id: Some((channel_id, user_channel_id)),
+                       closes_channel: true,
                        shutdown_finish: Some((shutdown_res, channel_update)),
-                       channel_capacity: Some(channel_capacity)
                }
        }
        #[inline]
@@ -617,14 +629,13 @@ impl MsgHandleErrInternal {
                                        },
                                },
                        },
-                       chan_id: None,
+                       closes_channel: false,
                        shutdown_finish: None,
-                       channel_capacity: None,
                }
        }
 
        fn closes_channel(&self) -> bool {
-               self.chan_id.is_some()
+               self.closes_channel
        }
 }
 
@@ -699,7 +710,7 @@ enum BackgroundEvent {
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
-       ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
        /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
        /// channel to continue normal operation.
        ///
@@ -713,6 +724,7 @@ enum BackgroundEvent {
        MonitorUpdateRegeneratedOnStartup {
                counterparty_node_id: PublicKey,
                funding_txo: OutPoint,
+               channel_id: ChannelId,
                update: ChannelMonitorUpdate
        },
        /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
@@ -741,7 +753,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
        /// outbound edge.
        EmitEventAndFreeOtherChannel {
                event: events::Event,
-               downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+               downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>,
        },
        /// Indicates we should immediately resume the operation of another channel, unless there is
        /// some other reason why the channel is blocked. In practice this simply means immediately
@@ -759,6 +771,7 @@ pub(crate) enum MonitorUpdateCompletionAction {
                downstream_counterparty_node_id: PublicKey,
                downstream_funding_outpoint: OutPoint,
                blocking_action: RAAMonitorUpdateBlockingAction,
+               downstream_channel_id: ChannelId,
        },
 }
 
@@ -770,6 +783,9 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
                (0, downstream_counterparty_node_id, required),
                (2, downstream_funding_outpoint, required),
                (4, blocking_action, required),
+               // Note that by the time we get past the required read above, downstream_funding_outpoint will be
+               // filled in, so we can safely unwrap it here.
+               (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
        },
        (2, EmitEventAndFreeOtherChannel) => {
                (0, event, upgradable_required),
@@ -787,12 +803,16 @@ pub(crate) enum EventCompletionAction {
        ReleaseRAAChannelMonitorUpdate {
                counterparty_node_id: PublicKey,
                channel_funding_outpoint: OutPoint,
+               channel_id: ChannelId,
        },
 }
 impl_writeable_tlv_based_enum!(EventCompletionAction,
        (0, ReleaseRAAChannelMonitorUpdate) => {
                (0, channel_funding_outpoint, required),
                (2, counterparty_node_id, required),
+               // Note that by the time we get past the required read above, channel_funding_outpoint will be
+               // filled in, so we can safely unwrap it here.
+               (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
        };
 );
 
@@ -814,7 +834,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
 impl RAAMonitorUpdateBlockingAction {
        fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
                Self::ForwardedPaymentInboundClaim {
-                       channel_id: prev_hop.outpoint.to_channel_id(),
+                       channel_id: prev_hop.channel_id,
                        htlc_id: prev_hop.htlc_id,
                }
        }
@@ -884,7 +904,9 @@ impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
                if require_disconnected && self.is_connected {
                        return false
                }
-               self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+               !self.channel_by_id.iter().any(|(_, phase)|
+                       matches!(phase, ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_))
+               )
                        && self.monitor_update_blocked_actions.is_empty()
                        && self.in_flight_monitor_updates.is_empty()
        }
@@ -956,6 +978,7 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
        Arc<DefaultRouter<
                Arc<NetworkGraph<Arc<L>>>,
                Arc<L>,
+               Arc<KeysManager>,
                Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
                ProbabilisticScoringFeeParameters,
                ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
@@ -986,6 +1009,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
                &'e DefaultRouter<
                        &'f NetworkGraph<&'g L>,
                        &'g L,
+                       &'c KeysManager,
                        &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
                        ProbabilisticScoringFeeParameters,
                        ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
@@ -1150,7 +1174,7 @@ where
 //              |
 //              |__`peer_state`
 //                  |
-//                  |__`id_to_peer`
+//                  |__`outpoint_to_peer`
 //                  |
 //                  |__`short_to_chan_info`
 //                  |
@@ -1244,11 +1268,7 @@ where
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        outbound_scid_aliases: Mutex<HashSet<u64>>,
 
-       /// `channel_id` -> `counterparty_node_id`.
-       ///
-       /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
-       /// multiple channels with the same `temporary_channel_id` to different peers can exist,
-       /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+       /// Channel funding outpoint -> `counterparty_node_id`.
        ///
        /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
        /// the corresponding channel for the event, as we only have access to the `channel_id` during
@@ -1266,7 +1286,10 @@ where
        /// required to access the channel with the `counterparty_node_id`.
        ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
-       id_to_peer: Mutex<HashMap<ChannelId, PublicKey>>,
+       #[cfg(not(test))]
+       outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
+       #[cfg(test)]
+       pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
 
        /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
        ///
@@ -1618,9 +1641,6 @@ pub struct ChannelDetails {
        pub counterparty: ChannelCounterparty,
        /// The Channel's funding transaction output, if we've negotiated the funding transaction with
        /// our counterparty already.
-       ///
-       /// Note that, if this has been set, `channel_id` will be equivalent to
-       /// `funding_txo.unwrap().to_channel_id()`.
        pub funding_txo: Option<OutPoint>,
        /// The features which this channel operates with. See individual features for more info.
        ///
@@ -1786,6 +1806,14 @@ pub struct ChannelDetails {
        ///
        /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
        pub config: Option<ChannelConfig>,
+       /// Pending inbound HTLCs.
+       ///
+       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+       pub pending_inbound_htlcs: Vec<InboundHTLCDetails>,
+       /// Pending outbound HTLCs.
+       ///
+       /// This field is empty for objects serialized with LDK versions prior to 0.0.122.
+       pub pending_outbound_htlcs: Vec<OutboundHTLCDetails>,
 }
 
 impl ChannelDetails {
@@ -1863,6 +1891,8 @@ impl ChannelDetails {
                        inbound_htlc_maximum_msat: context.get_holder_htlc_maximum_msat(),
                        config: Some(context.config()),
                        channel_shutdown_state: Some(context.shutdown_state()),
+                       pending_inbound_htlcs: context.get_pending_inbound_htlc_details(),
+                       pending_outbound_htlcs: context.get_pending_outbound_htlc_details(),
                }
        }
 }
@@ -1957,30 +1987,27 @@ macro_rules! handle_error {
 
                match $internal {
                        Ok(msg) => Ok(msg),
-                       Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
+                       Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
                                let mut msg_events = Vec::with_capacity(2);
 
                                if let Some((shutdown_res, update_option)) = shutdown_finish {
+                                       let counterparty_node_id = shutdown_res.counterparty_node_id;
+                                       let channel_id = shutdown_res.channel_id;
+                                       let logger = WithContext::from(
+                                               &$self.logger, Some(counterparty_node_id), Some(channel_id),
+                                       );
+                                       log_error!(logger, "Force-closing channel: {}", err.err);
+
                                        $self.finish_close_channel(shutdown_res);
                                        if let Some(update) = update_option {
                                                msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                        msg: update
                                                });
                                        }
-                                       if let Some((channel_id, user_channel_id)) = chan_id {
-                                               $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
-                                                       channel_id, user_channel_id,
-                                                       reason: ClosureReason::ProcessingError { err: err.err.clone() },
-                                                       counterparty_node_id: Some($counterparty_node_id),
-                                                       channel_capacity_sats: channel_capacity,
-                                               }, None));
-                                       }
+                               } else {
+                                       log_error!($self.logger, "Got non-closing error: {}", err.err);
                                }
 
-                               let logger = WithContext::from(
-                                       &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id)
-                               );
-                               log_error!(logger, "{}", err.err);
                                if let msgs::ErrorAction::IgnoreError = err.action {
                                } else {
                                        msg_events.push(events::MessageSendEvent::HandleError {
@@ -2006,7 +2033,9 @@ macro_rules! handle_error {
 
 macro_rules! update_maps_on_chan_removal {
        ($self: expr, $channel_context: expr) => {{
-               $self.id_to_peer.lock().unwrap().remove(&$channel_context.channel_id());
+               if let Some(outpoint) = $channel_context.get_funding_txo() {
+                       $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
+               }
                let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
                if let Some(short_id) = $channel_context.get_short_channel_id() {
                        short_to_chan_info.remove(&short_id);
@@ -2038,12 +2067,11 @@ macro_rules! convert_chan_phase_err {
                                let logger = WithChannelContext::from(&$self.logger, &$channel.context);
                                log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
                                update_maps_on_chan_removal!($self, $channel.context);
-                               let shutdown_res = $channel.context.force_shutdown(true);
-                               let user_id = $channel.context.get_user_id();
-                               let channel_capacity_satoshis = $channel.context.get_value_satoshis();
-
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
-                                       shutdown_res, $channel_update, channel_capacity_satoshis))
+                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                               let shutdown_res = $channel.context.force_shutdown(true, reason);
+                               let err =
+                                       MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
+                               (true, err)
                        },
                }
        };
@@ -2139,6 +2167,7 @@ macro_rules! emit_channel_pending_event {
                                counterparty_node_id: $channel.context.get_counterparty_node_id(),
                                user_channel_id: $channel.context.get_user_id(),
                                funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+                               channel_type: Some($channel.context.get_channel_type().clone()),
                        }, None));
                        $channel.context.set_channel_pending_event_emitted();
                }
@@ -2419,14 +2448,14 @@ where
 
                        best_block: RwLock::new(params.best_block),
 
-                       outbound_scid_aliases: Mutex::new(HashSet::new()),
-                       pending_inbound_payments: Mutex::new(HashMap::new()),
+                       outbound_scid_aliases: Mutex::new(new_hash_set()),
+                       pending_inbound_payments: Mutex::new(new_hash_map()),
                        pending_outbound_payments: OutboundPayments::new(),
-                       forward_htlcs: Mutex::new(HashMap::new()),
-                       claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: HashMap::new(), pending_claiming_payments: HashMap::new() }),
-                       pending_intercepted_htlcs: Mutex::new(HashMap::new()),
-                       id_to_peer: Mutex::new(HashMap::new()),
-                       short_to_chan_info: FairRwLock::new(HashMap::new()),
+                       forward_htlcs: Mutex::new(new_hash_map()),
+                       claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
+                       pending_intercepted_htlcs: Mutex::new(new_hash_map()),
+                       outpoint_to_peer: Mutex::new(new_hash_map()),
+                       short_to_chan_info: FairRwLock::new(new_hash_map()),
 
                        our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
                        secp_ctx,
@@ -2438,7 +2467,7 @@ where
 
                        highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
 
-                       per_peer_state: FairRwLock::new(HashMap::new()),
+                       per_peer_state: FairRwLock::new(new_hash_map()),
 
                        pending_events: Mutex::new(VecDeque::new()),
                        pending_events_processor: AtomicBool::new(false),
@@ -2576,7 +2605,7 @@ where
        fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
-               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
                // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
                // unlikely as the `short_to_chan_info` map often contains 2 entries for
                // the same channel.
@@ -2609,7 +2638,7 @@ where
        pub fn list_channels(&self) -> Vec<ChannelDetails> {
                // Allocate our best estimate of the number of channels we have in the `res`
                // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
-               // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
+               // a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
                // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
                // unlikely as the `short_to_chan_info` map often contains 2 entries for
                // the same channel.
@@ -2700,26 +2729,6 @@ where
                        .collect()
        }
 
-       /// Helper function that issues the channel close events
-       fn issue_channel_close_events(&self, context: &ChannelContext<SP>, closure_reason: ClosureReason) {
-               let mut pending_events_lock = self.pending_events.lock().unwrap();
-               match context.unbroadcasted_funding() {
-                       Some(transaction) => {
-                               pending_events_lock.push_back((events::Event::DiscardFunding {
-                                       channel_id: context.channel_id(), transaction
-                               }, None));
-                       },
-                       None => {},
-               }
-               pending_events_lock.push_back((events::Event::ChannelClosed {
-                       channel_id: context.channel_id(),
-                       user_channel_id: context.get_user_id(),
-                       reason: closure_reason,
-                       counterparty_node_id: Some(context.get_counterparty_node_id()),
-                       channel_capacity_sats: Some(context.get_value_satoshis()),
-               }, None));
-       }
-
        fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
@@ -2761,9 +2770,8 @@ where
                                                                peer_state_lock, peer_state, per_peer_state, chan);
                                                }
                                        } else {
-                                               self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed);
                                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
-                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false));
+                                               shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed));
                                        }
                                },
                                hash_map::Entry::Vacant(_) => {
@@ -2860,14 +2868,16 @@ where
                let logger = WithContext::from(
                        &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id),
                );
-               log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
+
+               log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
+                       shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
                for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
                        let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
-               if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
+               if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -2885,8 +2895,7 @@ where
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
                                                update_maps_on_chan_removal!(self, &chan.context());
-                                               self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure);
-                                               shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                               shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure));
                                        }
                                }
                                has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
@@ -2896,6 +2905,24 @@ where
                                "Closing a batch where all channels have completed initial monitor update",
                        );
                }
+
+               {
+                       let mut pending_events = self.pending_events.lock().unwrap();
+                       pending_events.push_back((events::Event::ChannelClosed {
+                               channel_id: shutdown_res.channel_id,
+                               user_channel_id: shutdown_res.user_channel_id,
+                               reason: shutdown_res.closure_reason,
+                               counterparty_node_id: Some(shutdown_res.counterparty_node_id),
+                               channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
+                               channel_funding_txo: shutdown_res.channel_funding_txo,
+                       }, None));
+
+                       if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
+                               pending_events.push_back((events::Event::DiscardFunding {
+                                       channel_id: shutdown_res.channel_id, transaction
+                               }, None));
+                       }
+               }
                for shutdown_result in shutdown_results.drain(..) {
                        self.finish_close_channel(shutdown_result);
                }
@@ -2918,17 +2945,16 @@ where
                        let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id));
                        if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
                                log_error!(logger, "Force-closing channel {}", channel_id);
-                               self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
                                mem::drop(peer_state);
                                mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
-                                               self.finish_close_channel(chan.context.force_shutdown(broadcast));
+                                               self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason));
                                                (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
                                        },
                                        ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
-                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false));
+                                               self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason));
                                                // Unfunded channel has no update
                                                (None, chan_phase.context().get_counterparty_node_id())
                                        },
@@ -2995,8 +3021,8 @@ where
        /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
        /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
        ///
-       /// You can always get the latest local transaction(s) to broadcast from
-       /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+       /// You can always broadcast the latest local transaction(s) via
+       /// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
        pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey)
        -> Result<(), APIError> {
                self.force_close_sending_error(channel_id, counterparty_node_id, false)
@@ -3029,8 +3055,9 @@ where
 
                let is_intro_node_forward = match next_hop {
                        onion_utils::Hop::Forward {
-                               // TODO: update this when we support blinded forwarding as non-intro node
-                               next_hop_data: msgs::InboundOnionPayload::BlindedForward { .. }, ..
+                               next_hop_data: msgs::InboundOnionPayload::BlindedForward {
+                                       intro_node_blinding_point: Some(_), ..
+                               }, ..
                        } => true,
                        _ => false,
                };
@@ -3239,14 +3266,14 @@ where
                                                // delay) once they've send us a commitment_signed!
                                                PendingHTLCStatus::Forward(info)
                                        },
-                                       Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+                                       Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
                                }
                        },
                        onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
                                match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
                                        new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
                                        Ok(info) => PendingHTLCStatus::Forward(info),
-                                       Err(InboundOnionErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
+                                       Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
                                }
                        }
                }
@@ -3674,7 +3701,7 @@ where
                                ProbeSendFailure::RouteNotFound
                        })?;
 
-               let mut used_liquidity_map = HashMap::with_capacity(first_hops.len());
+               let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
 
                let mut res = Vec::new();
 
@@ -3748,18 +3775,18 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+               let funding_txo;
+               let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) {
                        Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
-                               let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+                               funding_txo = find_funding_output(&chan, &funding_transaction)?;
 
                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger)
                                        .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e {
                                                let channel_id = chan.context.channel_id();
-                                               let user_id = chan.context.get_user_id();
-                                               let shutdown_res = chan.context.force_shutdown(false);
-                                               let channel_capacity = chan.context.get_value_satoshis();
-                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
+                                               let reason = ClosureReason::ProcessingError { err: msg.clone() };
+                                               let shutdown_res = chan.context.force_shutdown(false, reason);
+                                               (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None))
                                        } else { unreachable!(); });
                                match funding_res {
                                        Ok(funding_msg) => (chan, funding_msg),
@@ -3798,9 +3825,21 @@ where
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
-                               if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
-                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                               let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                               match outpoint_to_peer.entry(funding_txo) {
+                                       hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
+                                       hash_map::Entry::Occupied(o) => {
+                                               let err = format!(
+                                                       "An existing channel using outpoint {} is open with peer {}",
+                                                       funding_txo, o.get()
+                                               );
+                                               mem::drop(outpoint_to_peer);
+                                               mem::drop(peer_state_lock);
+                                               mem::drop(per_peer_state);
+                                               let reason = ClosureReason::ProcessingError { err: err.clone() };
+                                               self.finish_close_channel(chan.context.force_shutdown(true, reason));
+                                               return Err(APIError::ChannelUnavailable { err });
+                                       }
                                }
                                e.insert(ChannelPhase::UnfundedOutboundV1(chan));
                        }
@@ -3937,7 +3976,10 @@ where
                                        }
                                        let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
                                        if let Some(funding_batch_state) = funding_batch_state.as_mut() {
-                                               funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false));
+                                               // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
+                                               // need to fix this somehow to not rely on using the outpoint for the channel ID if we
+                                               // want to support V2 batching here as well.
+                                               funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
                                        }
                                        Ok(outpoint)
                                })
@@ -3964,11 +4006,12 @@ where
                                                .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id))
                                                .map(|mut chan| {
                                                        update_maps_on_chan_removal!(self, &chan.context());
-                                                       self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() });
-                                                       shutdown_results.push(chan.context_mut().force_shutdown(false));
+                                                       let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
+                                                       shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason));
                                                });
                                }
                        }
+                       mem::drop(funding_batch_states);
                        for shutdown_result in shutdown_results.drain(..) {
                                self.finish_close_channel(shutdown_result);
                        }
@@ -4161,6 +4204,7 @@ where
                let mut per_source_pending_forward = [(
                        payment.prev_short_channel_id,
                        payment.prev_funding_outpoint,
+                       payment.prev_channel_id,
                        payment.prev_user_channel_id,
                        vec![(pending_htlc_info, payment.prev_htlc_id)]
                )];
@@ -4188,6 +4232,7 @@ where
                                short_channel_id: payment.prev_short_channel_id,
                                user_channel_id: Some(payment.prev_user_channel_id),
                                outpoint: payment.prev_funding_outpoint,
+                               channel_id: payment.prev_channel_id,
                                htlc_id: payment.prev_htlc_id,
                                incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
                                phantom_shared_secret: None,
@@ -4211,9 +4256,9 @@ where
 
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
-               let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
+               let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                {
-                       let mut forward_htlcs = HashMap::new();
+                       let mut forward_htlcs = new_hash_map();
                        mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
 
                        for (short_chan_id, mut pending_forwards) in forward_htlcs {
@@ -4224,20 +4269,21 @@ where
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                               forward_info: PendingHTLCInfo {
+                                                                               prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                                               prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                        routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
                                                                                        outgoing_cltv_value, ..
                                                                                }
                                                                        }) => {
                                                                                macro_rules! failure_handler {
                                                                                        ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
-                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id()));
+                                                                                               let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id));
                                                                                                log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
 
                                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: prev_short_channel_id,
                                                                                                        user_channel_id: Some(prev_user_channel_id),
+                                                                                                       channel_id: prev_channel_id,
                                                                                                        outpoint: prev_funding_outpoint,
                                                                                                        htlc_id: prev_htlc_id,
                                                                                                        incoming_packet_shared_secret: incoming_shared_secret,
@@ -4301,8 +4347,8 @@ where
                                                                                                                        outgoing_cltv_value, Some(phantom_shared_secret), false, None,
                                                                                                                        current_height, self.default_configuration.accept_mpp_keysend)
                                                                                                                {
-                                                                                                                       Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
-                                                                                                                       Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                       Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])),
+                                                                                                                       Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                }
                                                                                                        },
                                                                                                        _ => panic!(),
@@ -4344,10 +4390,10 @@ where
                                        if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
                                                let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                for forward_info in pending_forwards.drain(..) {
-                                                       match forward_info {
+                                                       let queue_fail_htlc_res = match forward_info {
                                                                HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                       prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                                       forward_info: PendingHTLCInfo {
+                                                                       prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                                       prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                                incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
                                                                                routing: PendingHTLCRouting::Forward {
                                                                                        onion_packet, blinded, ..
@@ -4358,12 +4404,13 @@ where
                                                                        let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
+                                                                               channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
                                                                                // Phantom payments are only PendingHTLCRouting::Receive.
                                                                                phantom_shared_secret: None,
-                                                                               blinded_failure: blinded.map(|_| BlindedFailure::FromIntroductionNode),
+                                                                               blinded_failure: blinded.map(|b| b.failure),
                                                                        });
                                                                        let next_blinding_point = blinded.and_then(|b| {
                                                                                let encrypted_tlvs_ss = self.node_signer.ecdh(
@@ -4390,40 +4437,35 @@ where
                                                                                ));
                                                                                continue;
                                                                        }
+                                                                       None
                                                                },
                                                                HTLCForwardInfo::AddHTLC { .. } => {
                                                                        panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
                                                                },
                                                                HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
                                                                        log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_htlc(
-                                                                               htlc_id, err_packet, &&logger
-                                                                       ) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
                                                                },
                                                                HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
-                                                                       log_trace!(self.logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
-                                                                       if let Err(e) = chan.queue_fail_malformed_htlc(htlc_id, failure_code, sha256_of_onion, &self.logger) {
-                                                                               if let ChannelError::Ignore(msg) = e {
-                                                                                       log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
-                                                                               } else {
-                                                                                       panic!("Stated return value requirements in queue_fail_malformed_htlc() were not met");
-                                                                               }
-                                                                               // fail-backs are best-effort, we probably already have one
-                                                                               // pending, and if not that's OK, if not, the channel is on
-                                                                               // the chain and sending the HTLC-Timeout is their problem.
-                                                                               continue;
-                                                                       }
+                                                                       log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+                                                                       let res = chan.queue_fail_malformed_htlc(
+                                                                               htlc_id, failure_code, sha256_of_onion, &&logger
+                                                                       );
+                                                                       Some((res, htlc_id))
                                                                },
+                                                       };
+                                                       if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+                                                               if let Err(e) = queue_fail_htlc_res {
+                                                                       if let ChannelError::Ignore(msg) = e {
+                                                                               log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+                                                                       } else {
+                                                                               panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
+                                                                       }
+                                                                       // fail-backs are best-effort, we probably already have one
+                                                                       // pending, and if not that's OK, if not, the channel is on
+                                                                       // the chain and sending the HTLC-Timeout is their problem.
+                                                                       continue;
+                                                               }
                                                        }
                                                }
                                        } else {
@@ -4434,8 +4476,8 @@ where
                                        'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
                                                match forward_info {
                                                        HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                               prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
-                                                               forward_info: PendingHTLCInfo {
+                                                               prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+                                                               prev_user_channel_id, forward_info: PendingHTLCInfo {
                                                                        routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
                                                                        skimmed_fee_msat, ..
                                                                }
@@ -4469,6 +4511,7 @@ where
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                user_channel_id: Some(prev_user_channel_id),
+                                                                               channel_id: prev_channel_id,
                                                                                outpoint: prev_funding_outpoint,
                                                                                htlc_id: prev_htlc_id,
                                                                                incoming_packet_shared_secret: incoming_shared_secret,
@@ -4500,6 +4543,7 @@ where
                                                                                failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                short_channel_id: $htlc.prev_hop.short_channel_id,
                                                                                                user_channel_id: $htlc.prev_hop.user_channel_id,
+                                                                                               channel_id: prev_channel_id,
                                                                                                outpoint: prev_funding_outpoint,
                                                                                                htlc_id: $htlc.prev_hop.htlc_id,
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
@@ -4580,7 +4624,6 @@ where
                                                                                        #[allow(unused_assignments)] {
                                                                                                committed_to_claimable = true;
                                                                                        }
-                                                                                       let prev_channel_id = prev_funding_outpoint.to_channel_id();
                                                                                        htlcs.push(claimable_htlc);
                                                                                        let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
                                                                                        htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
@@ -4724,19 +4767,19 @@ where
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                               BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                },
-                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
                                        let mut updated_chan = false;
                                        {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
-                                                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+                                                       match peer_state.channel_by_id.entry(channel_id) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
                                                                        if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
                                                                                updated_chan = true;
@@ -4887,8 +4930,7 @@ where
                                        log_error!(logger,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
-                                       shutdown_channels.push(context.force_shutdown(false));
+                                       shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
@@ -5276,10 +5318,10 @@ where
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
-                               ref phantom_shared_secret, ref outpoint, ref blinded_failure, ..
+                               ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
                        }) => {
                                log_trace!(
-                                       WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())),
+                                       WithContext::from(&self.logger, None, Some(*channel_id)),
                                        "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
                                        if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
                                );
@@ -5323,7 +5365,7 @@ where
                                if push_forward_ev { self.push_pending_forwards_ev(); }
                                let mut pending_events = self.pending_events.lock().unwrap();
                                pending_events.push_back((events::Event::HTLCHandlingFailed {
-                                       prev_channel_id: outpoint.to_channel_id(),
+                                       prev_channel_id: *channel_id,
                                        failed_next_destination: destination,
                                }, None));
                        },
@@ -5464,7 +5506,7 @@ where
                }
                if valid_mpp {
                        for htlc in sources.drain(..) {
-                               let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id();
+                               let prev_hop_chan_id = htlc.prev_hop.channel_id;
                                if let Err((pk, err)) = self.claim_funds_from_hop(
                                        htlc.prev_hop, payment_preimage,
                                        |_, definitely_duplicate| {
@@ -5517,7 +5559,7 @@ where
 
                {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       let chan_id = prev_hop.outpoint.to_channel_id();
+                       let chan_id = prev_hop.channel_id;
                        let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
                                Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
                                None => None
@@ -5555,6 +5597,7 @@ where
                                                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                                                        counterparty_node_id,
                                                                                        funding_txo: prev_hop.outpoint,
+                                                                                       channel_id: prev_hop.channel_id,
                                                                                        update: monitor_update.clone(),
                                                                                });
                                                                }
@@ -5569,13 +5612,13 @@ where
 
                                                                log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
                                                                        chan_id, action);
-                                                               let (node_id, funding_outpoint, blocker) =
+                                                               let (node_id, _funding_outpoint, channel_id, blocker) =
                                                                if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: node_id,
                                                                        downstream_funding_outpoint: funding_outpoint,
-                                                                       blocking_action: blocker,
+                                                                       blocking_action: blocker, downstream_channel_id: channel_id,
                                                                } = action {
-                                                                       (node_id, funding_outpoint, blocker)
+                                                                       (node_id, funding_outpoint, channel_id, blocker)
                                                                } else {
                                                                        debug_assert!(false,
                                                                                "Duplicate claims should always free another channel immediately");
@@ -5585,7 +5628,7 @@ where
                                                                        let mut peer_state = peer_state_mtx.lock().unwrap();
                                                                        if let Some(blockers) = peer_state
                                                                                .actions_blocking_raa_monitor_updates
-                                                                               .get_mut(&funding_outpoint.to_channel_id())
+                                                                               .get_mut(&channel_id)
                                                                        {
                                                                                let mut found_blocker = false;
                                                                                blockers.retain(|iter| {
@@ -5610,9 +5653,11 @@ where
                }
                let preimage_update = ChannelMonitorUpdate {
                        update_id: CLOSED_CHANNEL_UPDATE_ID,
+                       counterparty_node_id: None,
                        updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
                                payment_preimage,
                        }],
+                       channel_id: Some(prev_hop.channel_id),
                };
 
                if !during_init {
@@ -5624,7 +5669,8 @@ where
                                // with a preimage we *must* somehow manage to propagate it to the upstream
                                // channel, or we must have an ability to receive the same event and try
                                // again on restart.
-                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+                               log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)),
+                                       "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
                                        payment_preimage, update_res);
                        }
                } else {
@@ -5640,7 +5686,7 @@ where
                        // complete the monitor update completion action from `completion_action`.
                        self.pending_background_events.lock().unwrap().push(
                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((
-                                       prev_hop.outpoint, preimage_update,
+                                       prev_hop.outpoint, prev_hop.channel_id, preimage_update,
                                )));
                }
                // Note that we do process the completion action here. This totally could be a
@@ -5657,8 +5703,9 @@ where
        }
 
        fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
-               forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
-               next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+               forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+               startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+               next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
        ) {
                match source {
                        HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
@@ -5668,7 +5715,7 @@ where
                                        debug_assert_eq!(pubkey, path.hops[0].pubkey);
                                }
                                let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
-                                       channel_funding_outpoint: next_channel_outpoint,
+                                       channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
                                        counterparty_node_id: path.hops[0].pubkey,
                                };
                                self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
@@ -5676,15 +5723,17 @@ where
                                        &self.logger);
                        },
                        HTLCSource::PreviousHopData(hop_data) => {
-                               let prev_outpoint = hop_data.outpoint;
+                               let prev_channel_id = hop_data.channel_id;
                                let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
                                #[cfg(debug_assertions)]
                                let claiming_chan_funding_outpoint = hop_data.outpoint;
+                               #[cfg(debug_assertions)]
+                               let claiming_channel_id = hop_data.channel_id;
                                let res = self.claim_funds_from_hop(hop_data, payment_preimage,
                                        |htlc_claim_value_msat, definitely_duplicate| {
                                                let chan_to_release =
                                                        if let Some(node_id) = next_channel_counterparty_node_id {
-                                                               Some((node_id, next_channel_outpoint, completed_blocker))
+                                                               Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker))
                                                        } else {
                                                                // We can only get `None` here if we are processing a
                                                                // `ChannelMonitor`-originated event, in which case we
@@ -5721,7 +5770,7 @@ where
                                                                                },
                                                                                // or the channel we'd unblock is already closed,
                                                                                BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(
-                                                                                       (funding_txo, monitor_update)
+                                                                                       (funding_txo, _channel_id, monitor_update)
                                                                                ) => {
                                                                                        if *funding_txo == next_channel_outpoint {
                                                                                                assert_eq!(monitor_update.updates.len(), 1);
@@ -5737,7 +5786,7 @@ where
                                                                                BackgroundEvent::MonitorUpdatesComplete {
                                                                                        channel_id, ..
                                                                                } =>
-                                                                                       *channel_id == claiming_chan_funding_outpoint.to_channel_id(),
+                                                                                       *channel_id == claiming_channel_id,
                                                                        }
                                                                }), "{:?}", *background_events);
                                                        }
@@ -5747,22 +5796,26 @@ where
                                                                Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
                                                                        downstream_counterparty_node_id: other_chan.0,
                                                                        downstream_funding_outpoint: other_chan.1,
-                                                                       blocking_action: other_chan.2,
+                                                                       downstream_channel_id: other_chan.2,
+                                                                       blocking_action: other_chan.3,
                                                                })
                                                        } else { None }
                                                } else {
-                                                       let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+                                                       let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
                                                                if let Some(claimed_htlc_value) = htlc_claim_value_msat {
                                                                        Some(claimed_htlc_value - forwarded_htlc_value)
                                                                } else { None }
                                                        } else { None };
+                                                       debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+                                                               "skimmed_fee_msat must always be included in total_fee_earned_msat");
                                                        Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                                event: events::Event::PaymentForwarded {
-                                                                       fee_earned_msat,
+                                                                       total_fee_earned_msat,
                                                                        claim_from_onchain_tx: from_onchain,
-                                                                       prev_channel_id: Some(prev_outpoint.to_channel_id()),
-                                                                       next_channel_id: Some(next_channel_outpoint.to_channel_id()),
+                                                                       prev_channel_id: Some(prev_channel_id),
+                                                                       next_channel_id: Some(next_channel_id),
                                                                        outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+                                                                       skimmed_fee_msat,
                                                                },
                                                                downstream_counterparty_and_funding_outpoint: chan_to_release,
                                                        })
@@ -5811,16 +5864,17 @@ where
                                        event, downstream_counterparty_and_funding_outpoint
                                } => {
                                        self.pending_events.lock().unwrap().push_back((event, None));
-                                       if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
-                                               self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+                                       if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint {
+                                               self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
                                        }
                                },
                                MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
-                                       downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action,
+                                       downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
                                } => {
                                        self.handle_monitor_update_release(
                                                downstream_counterparty_node_id,
                                                downstream_funding_outpoint,
+                                               downstream_channel_id,
                                                Some(blocking_action),
                                        );
                                },
@@ -5835,7 +5889,7 @@ where
                commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
                pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
                channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-       -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
+       -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> {
                let logger = WithChannelContext::from(&self.logger, &channel.context);
                log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
                        &channel.context.channel_id(),
@@ -5850,7 +5904,7 @@ where
                let counterparty_node_id = channel.context.get_counterparty_node_id();
                if !pending_forwards.is_empty() {
                        htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
-                               channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
+                               channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards));
                }
 
                if let Some(msg) = channel_ready {
@@ -5904,16 +5958,16 @@ where
                htlc_forwards
        }
 
-       fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
+       fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
                debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
 
                let counterparty_node_id = match counterparty_node_id {
                        Some(cp_id) => cp_id.clone(),
                        None => {
                                // TODO: Once we can rely on the counterparty_node_id from the
-                               // monitor event, this and the id_to_peer map should be removed.
-                               let id_to_peer = self.id_to_peer.lock().unwrap();
-                               match id_to_peer.get(&funding_txo.to_channel_id()) {
+                               // monitor event, this and the outpoint_to_peer map should be removed.
+                               let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                               match outpoint_to_peer.get(funding_txo) {
                                        Some(cp_id) => cp_id.clone(),
                                        None => return,
                                }
@@ -5926,11 +5980,11 @@ where
                peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let channel =
-                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+                       if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
                                chan
                        } else {
                                let update_actions = peer_state.monitor_update_blocked_actions
-                                       .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new());
+                                       .remove(&channel_id).unwrap_or(Vec::new());
                                mem::drop(peer_state_lock);
                                mem::drop(per_peer_state);
                                self.handle_monitor_update_completion_actions(update_actions);
@@ -5994,13 +6048,20 @@ where
        }
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
+
+               let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id));
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels =
                        self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+               .ok_or_else(|| {
+                       let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
+                       log_error!(logger, "{}", err_str);
+
+                       APIError::ChannelUnavailable { err: err_str }
+               })?;
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
                let is_only_peer_channel = peer_state.total_channel_count() == 1;
@@ -6015,9 +6076,19 @@ where
                                InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
                                        counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
                                        &unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
-                                       &self.logger, accept_0conf).map_err(|e| APIError::ChannelUnavailable { err: e.to_string() })
+                                       &self.logger, accept_0conf).map_err(|e| {
+                                               let err_str = e.to_string();
+                                               log_error!(logger, "{}", err_str);
+
+                                               APIError::ChannelUnavailable { err: err_str }
+                                       })
+                               }
+                       _ => {
+                               let err_str = "No such channel awaiting to be accepted.".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               Err(APIError::APIMisuseError { err: err_str })
                        }
-                       _ => Err(APIError::APIMisuseError { err: "No such channel awaiting to be accepted.".to_owned() })
                }?;
 
                if accept_0conf {
@@ -6031,7 +6102,10 @@ where
                                }
                        };
                        peer_state.pending_msg_events.push(send_msg_err_event);
-                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                       let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+                       log_error!(logger, "{}", err_str);
+
+                       return Err(APIError::APIMisuseError { err: err_str });
                } else {
                        // If this peer already has some channels, a new channel won't increase our number of peers
                        // with unfunded channels, so as long as we aren't over the maximum number of unfunded
@@ -6044,7 +6118,10 @@ where
                                        }
                                };
                                peer_state.pending_msg_events.push(send_msg_err_event);
-                               return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
+                               let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+                               log_error!(logger, "{}", err_str);
+
+                               return Err(APIError::APIMisuseError { err: err_str });
                        }
                }
 
@@ -6167,13 +6244,18 @@ where
 
                // If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
                if self.default_configuration.manually_accept_inbound_channels {
+                       let channel_type = channel::channel_type_from_open_channel(
+                                       &msg, &peer_state.latest_features, &self.channel_type_features()
+                               ).map_err(|e|
+                                       MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id)
+                               )?;
                        let mut pending_events = self.pending_events.lock().unwrap();
                        pending_events.push_back((events::Event::OpenChannelRequest {
                                temporary_channel_id: msg.temporary_channel_id.clone(),
                                counterparty_node_id: counterparty_node_id.clone(),
                                funding_satoshis: msg.funding_satoshis,
                                push_msat: msg.push_msat,
-                               channel_type: msg.channel_type.clone().unwrap(),
+                               channel_type,
                        }, None));
                        peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
                                open_channel_msg: msg.clone(),
@@ -6265,50 +6347,61 @@ where
 
                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                let peer_state = &mut *peer_state_lock;
-               let (chan, funding_msg_opt, monitor) =
+               let (mut chan, funding_msg_opt, monitor) =
                        match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
                                Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
                                        let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
                                        match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
                                                Ok(res) => res,
-                                               Err((mut inbound_chan, err)) => {
+                                               Err((inbound_chan, err)) => {
                                                        // We've already removed this inbound channel from the map in `PeerState`
                                                        // above so at this point we just need to clean up any lingering entries
                                                        // concerning this channel as it is safe to do so.
-                                                       update_maps_on_chan_removal!(self, &inbound_chan.context);
-                                                       let user_id = inbound_chan.context.get_user_id();
-                                                       let shutdown_res = inbound_chan.context.force_shutdown(false);
-                                                       return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
-                                                               msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
+                                                       debug_assert!(matches!(err, ChannelError::Close(_)));
+                                                       // Really we should be returning the channel_id the peer expects based
+                                                       // on their funding info here, but they're horribly confused anyway, so
+                                                       // there's not a lot we can do to save them.
+                                                       return Err(convert_chan_phase_err!(self, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
                                                },
                                        }
                                },
-                               Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+                               Some(mut phase) => {
+                                       let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
+                                       let err = ChannelError::Close(err_msg);
+                                       return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
                        };
 
-               match peer_state.channel_by_id.entry(chan.context.channel_id()) {
+               let funded_channel_id = chan.context.channel_id();
+
+               macro_rules! fail_chan { ($err: expr) => { {
+                       // Note that at this point we've filled in the funding outpoint on our
+                       // channel, but its actually in conflict with another channel. Thus, if
+                       // we call `convert_chan_phase_err` immediately (thus calling
+                       // `update_maps_on_chan_removal`), we'll remove the existing channel
+                       // from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
+                       // on the channel.
+                       let err = ChannelError::Close($err.to_owned());
+                       chan.unset_funding_info(msg.temporary_channel_id);
+                       return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
+               } } }
+
+               match peer_state.channel_by_id.entry(funded_channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                       "Already had channel with the new channel_id".to_owned(),
-                                       chan.context.channel_id()
-                               ))
+                               fail_chan!("Already had channel with the new channel_id");
                        },
                        hash_map::Entry::Vacant(e) => {
-                               let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
-                               match id_to_peer_lock.entry(chan.context.channel_id()) {
+                               let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
+                               match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
                                        hash_map::Entry::Occupied(_) => {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                                       "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
-                                                       chan.context.channel_id()))
+                                               fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
                                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
                                                if let Ok(persist_state) = monitor_res {
                                                        i_e.insert(chan.context.get_counterparty_node_id());
-                                                       mem::drop(id_to_peer_lock);
+                                                       mem::drop(outpoint_to_peer_lock);
 
                                                        // There's no problem signing a counterparty's funding transaction if our monitor
                                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
@@ -6331,13 +6424,7 @@ where
                                                } else {
                                                        let logger = WithChannelContext::from(&self.logger, &chan.context);
                                                        log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
-                                                       let channel_id = match funding_msg_opt {
-                                                               Some(msg) => msg.channel_id,
-                                                               None => chan.context.channel_id(),
-                                                       };
-                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
-                                                               "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
-                                                               channel_id));
+                                                       fail_chan!("Duplicate funding outpoint");
                                                }
                                        }
                                }
@@ -6368,7 +6455,7 @@ where
                                        let res =
                                                chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
                                        match res {
-                                               Ok((chan, monitor)) => {
+                                               Ok((mut chan, monitor)) => {
                                                        if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
                                                                // We really should be able to insert here without doing a second
                                                                // lookup, but sadly rust stdlib doesn't currently allow keeping
@@ -6380,6 +6467,11 @@ where
                                                                Ok(())
                                                        } else {
                                                                let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned());
+                                                               // We weren't able to watch the channel to begin with, so no
+                                                               // updates should be made on it. Previously, full_stack_target
+                                                               // found an (unreachable) panic when the monitor update contained
+                                                               // within `shutdown_finish` was applied.
+                                                               chan.unset_funding_info(msg.channel_id);
                                                                return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
                                                        }
                                                },
@@ -6502,9 +6594,8 @@ where
                                                let context = phase.context_mut();
                                                let logger = WithChannelContext::from(&self.logger, context);
                                                log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
-                                               self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
-                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false));
+                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel));
                                        },
                                }
                        } else {
@@ -6573,7 +6664,6 @@ where
                                        msg: update
                                });
                        }
-                       self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
                }
                mem::drop(per_peer_state);
                if let Some(shutdown_result) = shutdown_result {
@@ -6665,7 +6755,7 @@ where
 
        fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
                let funding_txo;
-               let (htlc_source, forwarded_htlc_value) = {
+               let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -6703,7 +6793,11 @@ where
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
                };
-               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo);
+               self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+                       Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+                       funding_txo, msg.channel_id
+               );
+
                Ok(())
        }
 
@@ -6791,8 +6885,8 @@ where
        }
 
        #[inline]
-       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
-               for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+       fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
+               for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut push_forward_event = false;
                        let mut new_intercept_events = VecDeque::new();
                        let mut failed_intercept_forwards = Vec::new();
@@ -6811,7 +6905,7 @@ where
                                        match forward_htlcs.entry(scid) {
                                                hash_map::Entry::Occupied(mut entry) => {
                                                        entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                               prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
+                                                               prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
                                                        if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
@@ -6829,15 +6923,16 @@ where
                                                                                        intercept_id
                                                                                }, None));
                                                                                entry.insert(PendingAddHTLCInfo {
-                                                                                       prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
+                                                                                       prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info });
                                                                        },
                                                                        hash_map::Entry::Occupied(_) => {
-                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id()));
+                                                                               let logger = WithContext::from(&self.logger, None, Some(prev_channel_id));
                                                                                log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
                                                                                let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                        short_channel_id: prev_short_channel_id,
                                                                                        user_channel_id: Some(prev_user_channel_id),
                                                                                        outpoint: prev_funding_outpoint,
+                                                                                       channel_id: prev_channel_id,
                                                                                        htlc_id: prev_htlc_id,
                                                                                        incoming_packet_shared_secret: forward_info.incoming_shared_secret,
                                                                                        phantom_shared_secret: None,
@@ -6857,7 +6952,7 @@ where
                                                                        push_forward_event = true;
                                                                }
                                                                entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
-                                                                       prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
+                                                                       prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })));
                                                        }
                                                }
                                        }
@@ -6901,13 +6996,14 @@ where
        /// the [`ChannelMonitorUpdate`] in question.
        fn raa_monitor_updates_held(&self,
                actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
-               channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+               channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
        ) -> bool {
                actions_blocking_raa_monitor_updates
-                       .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+                       .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
                || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
                        action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                channel_funding_outpoint,
+                               channel_id,
                                counterparty_node_id,
                        })
                })
@@ -6924,7 +7020,7 @@ where
 
                        if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
                                return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
-                                       chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+                                       chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
                        }
                }
                false
@@ -6946,7 +7042,7 @@ where
                                                let funding_txo_opt = chan.context.get_funding_txo();
                                                let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
                                                        self.raa_monitor_updates_held(
-                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+                                                               &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
                                                                *counterparty_node_id)
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
@@ -7192,29 +7288,31 @@ where
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
-                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id()));
+                                               let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
                                                if let Some(preimage) = htlc_update.payment_preimage {
                                                        log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
-                                                       self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint);
+                                                       self.claim_funds_internal(htlc_update.source, preimage,
+                                                               htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+                                                               false, counterparty_node_id, funding_outpoint, channel_id);
                                                } else {
                                                        log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
-                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
                                                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
-                                       MonitorEvent::HolderForceClosed(funding_outpoint) => {
+                                       MonitorEvent::HolderForceClosed(_funding_outpoint) => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                // TODO: Once we can rely on the counterparty_node_id from the
-                                                               // monitor event, this and the id_to_peer map should be removed.
-                                                               let id_to_peer = self.id_to_peer.lock().unwrap();
-                                                               id_to_peer.get(&funding_outpoint.to_channel_id()).cloned()
+                                                               // monitor event, this and the outpoint_to_peer map should be removed.
+                                                               let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
+                                                               outpoint_to_peer.get(&funding_outpoint).cloned()
                                                        }
                                                };
                                                if let Some(counterparty_node_id) = counterparty_node_id_opt {
@@ -7223,15 +7321,14 @@ where
                                                                let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                                let peer_state = &mut *peer_state_lock;
                                                                let pending_msg_events = &mut peer_state.pending_msg_events;
-                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+                                                               if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
                                                                        if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                                               failed_channels.push(chan.context.force_shutdown(false));
+                                                                               failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
                                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                                msg: update
                                                                                        });
                                                                                }
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@ -7243,8 +7340,8 @@ where
                                                        }
                                                }
                                        },
-                                       MonitorEvent::Completed { funding_txo, monitor_update_id } => {
-                                               self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
+                                       MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
+                                               self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
                                        },
                                }
                        }
@@ -7418,8 +7515,6 @@ where
                                                                                        });
                                                                                }
 
-                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
-
                                                                                log_info!(logger, "Broadcasting {}", log_tx!(tx));
                                                                                self.tx_broadcaster.broadcast_transactions(&[&tx]);
                                                                                update_maps_on_chan_removal!(self, &chan.context);
@@ -7463,14 +7558,14 @@ where
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
-                       if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() {
+                       if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
                                self.pending_background_events.lock().unwrap().push(
                                        BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                               counterparty_node_id, funding_txo, update
+                                               counterparty_node_id, funding_txo, update, channel_id,
                                        });
                        }
                        self.finish_close_channel(failure);
@@ -7483,32 +7578,43 @@ where
        ///
        /// # Privacy
        ///
-       /// Uses a one-hop [`BlindedPath`] for the offer with [`ChannelManager::get_our_node_id`] as the
-       /// introduction node and a derived signing pubkey for recipient privacy. As such, currently,
-       /// the node must be announced. Otherwise, there is no way to find a path to the introduction
-       /// node in order to send the [`InvoiceRequest`].
+       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the offer.
+       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+       /// order to send the [`InvoiceRequest`].
+       ///
+       /// Also, uses a derived signing pubkey in the offer for recipient privacy.
        ///
        /// # Limitations
        ///
        /// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
        /// reply path.
        ///
+       /// # Errors
+       ///
+       /// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
+       ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Offer`]: crate::offers::offer::Offer
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        pub fn create_offer_builder(
                &self, description: String
-       ) -> OfferBuilder<DerivedMetadata, secp256k1::All> {
+       ) -> Result<OfferBuilder<DerivedMetadata, secp256k1::All>, Bolt12SemanticError> {
                let node_id = self.get_our_node_id();
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
-               let path = self.create_one_hop_blinded_path();
 
-               OfferBuilder::deriving_signing_pubkey(description, node_id, expanded_key, entropy, secp_ctx)
+               let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+               let builder = OfferBuilder::deriving_signing_pubkey(
+                       description, node_id, expanded_key, entropy, secp_ctx
+               )
                        .chain_hash(self.chain_hash)
-                       .path(path)
+                       .path(path);
+
+               Ok(builder)
        }
 
        /// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
@@ -7533,10 +7639,13 @@ where
        ///
        /// # Privacy
        ///
-       /// Uses a one-hop [`BlindedPath`] for the refund with [`ChannelManager::get_our_node_id`] as
-       /// the introduction node and a derived payer id for payer privacy. As such, currently, the
-       /// node must be announced. Otherwise, there is no way to find a path to the introduction node
-       /// in order to send the [`Bolt12Invoice`].
+       /// Uses [`MessageRouter::create_blinded_paths`] to construct a [`BlindedPath`] for the refund.
+       /// However, if one is not found, uses a one-hop [`BlindedPath`] with
+       /// [`ChannelManager::get_our_node_id`] as the introduction node instead. In the latter case,
+       /// the node must be announced, otherwise, there is no way to find a path to the introduction in
+       /// order to send the [`Bolt12Invoice`].
+       ///
+       /// Also, uses a derived payer id in the refund for payer privacy.
        ///
        /// # Limitations
        ///
@@ -7545,14 +7654,17 @@ where
        ///
        /// # Errors
        ///
-       /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
-       /// or if `amount_msats` is invalid.
+       /// Errors if:
+       /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+       /// - `amount_msats` is invalid, or
+       /// - the parameterized [`Router`] is unable to create a blinded path for the refund.
        ///
        /// This is not exported to bindings users as builder patterns don't map outside of move semantics.
        ///
        /// [`Refund`]: crate::offers::refund::Refund
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        /// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
+       /// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
        pub fn create_refund_builder(
                &self, description: String, amount_msats: u64, absolute_expiry: Duration,
                payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
@@ -7561,8 +7673,8 @@ where
                let expanded_key = &self.inbound_payment_key;
                let entropy = &*self.entropy_source;
                let secp_ctx = &self.secp_ctx;
-               let path = self.create_one_hop_blinded_path();
 
+               let path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
                let builder = RefundBuilder::deriving_payer_id(
                        description, node_id, expanded_key, entropy, secp_ctx, amount_msats, payment_id
                )?
@@ -7620,8 +7732,11 @@ where
        ///
        /// # Errors
        ///
-       /// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link
-       /// or if the provided parameters are invalid for the offer.
+       /// Errors if:
+       /// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
+       /// - the provided parameters are invalid for the offer,
+       /// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
+       ///   request.
        ///
        /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
        /// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
@@ -7654,9 +7769,8 @@ where
                        None => builder,
                        Some(payer_note) => builder.payer_note(payer_note),
                };
-
                let invoice_request = builder.build_and_sign()?;
-               let reply_path = self.create_one_hop_blinded_path();
+               let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
                let expiration = StaleExpiration::TimerTicks(1);
                self.pending_outbound_payments
@@ -7705,6 +7819,11 @@ where
        /// node meeting the aforementioned criteria, but there's no guarantee that they will be
        /// received and no retries will be made.
        ///
+       /// # Errors
+       ///
+       /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
+       /// path for the invoice.
+       ///
        /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
        pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
                let expanded_key = &self.inbound_payment_key;
@@ -7716,23 +7835,24 @@ where
 
                match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
                        Ok((payment_hash, payment_secret)) => {
-                               let payment_paths = vec![
-                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                               ];
-                               #[cfg(not(feature = "no-std"))]
+                               let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
+                                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
+
+                               #[cfg(feature = "std")]
                                let builder = refund.respond_using_derived_keys(
                                        payment_paths, payment_hash, expanded_key, entropy
                                )?;
-                               #[cfg(feature = "no-std")]
+                               #[cfg(not(feature = "std"))]
                                let created_at = Duration::from_secs(
                                        self.highest_seen_timestamp.load(Ordering::Acquire) as u64
                                );
-                               #[cfg(feature = "no-std")]
+                               #[cfg(not(feature = "std"))]
                                let builder = refund.respond_using_derived_keys_no_std(
                                        payment_paths, payment_hash, created_at, expanded_key, entropy
                                )?;
                                let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
-                               let reply_path = self.create_one_hop_blinded_path();
+                               let reply_path = self.create_blinded_path()
+                                       .map_err(|_| Bolt12SemanticError::MissingPaths)?;
 
                                let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
                                if refund.paths().is_empty() {
@@ -7859,24 +7979,35 @@ where
                inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
        }
 
-       /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
-       /// node.
-       fn create_one_hop_blinded_path(&self) -> BlindedPath {
-               let entropy_source = self.entropy_source.deref();
+       /// Creates a blinded path by delegating to [`MessageRouter::create_blinded_paths`].
+       ///
+       /// Errors if the `MessageRouter` errors or returns an empty `Vec`.
+       fn create_blinded_path(&self) -> Result<BlindedPath, ()> {
+               let recipient = self.get_our_node_id();
                let secp_ctx = &self.secp_ctx;
-               BlindedPath::one_hop_for_message(self.get_our_node_id(), entropy_source, secp_ctx).unwrap()
+
+               let peers = self.per_peer_state.read().unwrap()
+                       .iter()
+                       .filter(|(_, peer)| peer.lock().unwrap().latest_features.supports_onion_messages())
+                       .map(|(node_id, _)| *node_id)
+                       .collect::<Vec<_>>();
+
+               self.router
+                       .create_blinded_paths(recipient, peers, secp_ctx)
+                       .and_then(|paths| paths.into_iter().next().ok_or(()))
        }
 
-       /// Creates a one-hop blinded path with [`ChannelManager::get_our_node_id`] as the introduction
-       /// node.
-       fn create_one_hop_blinded_payment_path(
-               &self, payment_secret: PaymentSecret
-       ) -> (BlindedPayInfo, BlindedPath) {
-               let entropy_source = self.entropy_source.deref();
+       /// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
+       /// [`Router::create_blinded_payment_paths`].
+       fn create_blinded_payment_paths(
+               &self, amount_msats: u64, payment_secret: PaymentSecret
+       ) -> Result<Vec<(BlindedPayInfo, BlindedPath)>, ()> {
                let secp_ctx = &self.secp_ctx;
 
+               let first_hops = self.list_usable_channels();
                let payee_node_id = self.get_our_node_id();
-               let max_cltv_expiry = self.best_block.read().unwrap().height() + LATENCY_GRACE_PERIOD_BLOCKS;
+               let max_cltv_expiry = self.best_block.read().unwrap().height() + CLTV_FAR_FAR_AWAY
+                       + LATENCY_GRACE_PERIOD_BLOCKS;
                let payee_tlvs = ReceiveTlvs {
                        payment_secret,
                        payment_constraints: PaymentConstraints {
@@ -7884,10 +8015,9 @@ where
                                htlc_minimum_msat: 1,
                        },
                };
-               // TODO: Err for overflow?
-               BlindedPath::one_hop_for_payment(
-                       payee_node_id, payee_tlvs, entropy_source, secp_ctx
-               ).unwrap()
+               self.router.create_blinded_payment_paths(
+                       payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
+               )
        }
 
        /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
@@ -7992,9 +8122,12 @@ where
        /// [`Event`] being handled) completes, this should be called to restore the channel to normal
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
-       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
+               channel_funding_outpoint: OutPoint, channel_id: ChannelId,
+               mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
+
                let logger = WithContext::from(
-                       &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id())
+                       &self.logger, Some(counterparty_node_id), Some(channel_id),
                );
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -8004,28 +8137,29 @@ where
                                if let Some(blocker) = completed_blocker.take() {
                                        // Only do this on the first iteration of the loop.
                                        if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
-                                               .get_mut(&channel_funding_outpoint.to_channel_id())
+                                               .get_mut(&channel_id)
                                        {
                                                blockers.retain(|iter| iter != &blocker);
                                        }
                                }
 
                                if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
-                                       channel_funding_outpoint, counterparty_node_id) {
+                                       channel_funding_outpoint, channel_id, counterparty_node_id) {
                                        // Check that, while holding the peer lock, we don't have anything else
                                        // blocking monitor updates for this channel. If we do, release the monitor
                                        // update(s) when those blockers complete.
                                        log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
-                                               &channel_funding_outpoint.to_channel_id());
+                                               &channel_id);
                                        break;
                                }
 
-                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+                               if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
+                                       channel_id) {
                                        if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
                                                debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
-                                                               channel_funding_outpoint.to_channel_id());
+                                                               channel_id);
                                                        handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
                                                                peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
@@ -8035,7 +8169,7 @@ where
                                                        }
                                                } else {
                                                        log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
-                                                               channel_funding_outpoint.to_channel_id());
+                                                               channel_id);
                                                }
                                        }
                                }
@@ -8052,9 +8186,9 @@ where
                for action in actions {
                        match action {
                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
-                                       channel_funding_outpoint, counterparty_node_id
+                                       channel_funding_outpoint, channel_id, counterparty_node_id
                                } => {
-                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
+                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
                                }
                        }
                }
@@ -8395,14 +8529,13 @@ where
                                                                update_maps_on_chan_removal!(self, &channel.context);
                                                                // It looks like our counterparty went on-chain or funding transaction was
                                                                // reorged out of the main chain. Close the channel.
-                                                               failed_channels.push(channel.context.force_shutdown(true));
+                                                               let reason_message = format!("{}", reason);
+                                                               failed_channels.push(channel.context.force_shutdown(true, reason));
                                                                if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
                                                                        pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
                                                                                msg: update
                                                                        });
                                                                }
-                                                               let reason_message = format!("{}", reason);
-                                                               self.issue_channel_close_events(&channel.context, reason);
                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                        node_id: channel.context.get_counterparty_node_id(),
                                                                        action: msgs::ErrorAction::DisconnectPeer {
@@ -8451,6 +8584,7 @@ where
                                                incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
                                                phantom_shared_secret: None,
                                                outpoint: htlc.prev_funding_outpoint,
+                                               channel_id: htlc.prev_channel_id,
                                                blinded_failure: htlc.forward_info.routing.blinded_failure(),
                                        });
 
@@ -8462,7 +8596,7 @@ where
                                                        HTLCFailReason::from_failure_code(0x2000 | 2),
                                                        HTLCDestination::InvalidForward { requested_forward_scid }));
                                        let logger = WithContext::from(
-                                               &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id())
+                                               &self.logger, None, Some(htlc.prev_channel_id)
                                        );
                                        log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
                                        false
@@ -8790,18 +8924,19 @@ where
                                                        }
                                                        &mut chan.context
                                                },
-                                               // Unfunded channels will always be removed.
-                                               ChannelPhase::UnfundedOutboundV1(chan) => {
-                                                       &mut chan.context
+                                               // We retain UnfundedOutboundV1 channel for some time in case
+                                               // peer unexpectedly disconnects, and intends to reconnect again.
+                                               ChannelPhase::UnfundedOutboundV1(_) => {
+                                                       return true;
                                                },
+                                               // Unfunded inbound channels will always be removed.
                                                ChannelPhase::UnfundedInboundV1(chan) => {
                                                        &mut chan.context
                                                },
                                        };
                                        // Clean up for removal.
                                        update_maps_on_chan_removal!(self, &context);
-                                       self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
-                                       failed_channels.push(context.force_shutdown(false));
+                                       failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer));
                                        false
                                });
                                // Note that we don't bother generating any events for pre-accept channels -
@@ -8896,8 +9031,8 @@ where
                                                        return NotifyOption::SkipPersistNoEvents;
                                                }
                                                e.insert(Mutex::new(PeerState {
-                                                       channel_by_id: HashMap::new(),
-                                                       inbound_channel_request_by_id: HashMap::new(),
+                                                       channel_by_id: new_hash_map(),
+                                                       inbound_channel_request_by_id: new_hash_map(),
                                                        latest_features: init_msg.features.clone(),
                                                        pending_msg_events: Vec::new(),
                                                        in_flight_monitor_updates: BTreeMap::new(),
@@ -8933,21 +9068,31 @@ where
                                let peer_state = &mut *peer_state_lock;
                                let pending_msg_events = &mut peer_state.pending_msg_events;
 
-                               peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
-                                               // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                                               // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
-                                               // worry about closing and removing them.
-                                               debug_assert!(false);
-                                               None
+                               for (_, phase) in peer_state.channel_by_id.iter_mut() {
+                                       match phase {
+                                               ChannelPhase::Funded(chan) => {
+                                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
+                                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_channel_reestablish(&&logger),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedOutboundV1(chan) => {
+                                                       pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+                                                               node_id: chan.context.get_counterparty_node_id(),
+                                                               msg: chan.get_open_channel(self.chain_hash),
+                                                       });
+                                               }
+
+                                               ChannelPhase::UnfundedInboundV1(_) => {
+                                                       // Since unfunded inbound channel maps are cleared upon disconnecting a peer,
+                                                       // they are not persisted and won't be recovered after a crash.
+                                                       // Therefore, they shouldn't exist at this point.
+                                                       debug_assert!(false);
+                                               }
                                        }
-                               ).for_each(|chan| {
-                                       let logger = WithChannelContext::from(&self.logger, &chan.context);
-                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                               node_id: chan.context.get_counterparty_node_id(),
-                                               msg: chan.get_channel_reestablish(&&logger),
-                                       });
-                               });
+                               }
                        }
 
                        return NotifyOption::SkipPersistHandleEvents;
@@ -9127,7 +9272,7 @@ where
                                let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
                                        &invoice_request
                                ) {
-                                       Ok(amount_msats) => Some(amount_msats),
+                                       Ok(amount_msats) => amount_msats,
                                        Err(error) => return Some(OffersMessage::InvoiceError(error.into())),
                                };
                                let invoice_request = match invoice_request.verify(expanded_key, secp_ctx) {
@@ -9137,64 +9282,69 @@ where
                                                return Some(OffersMessage::InvoiceError(error.into()));
                                        },
                                };
-                               let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
 
-                               match self.create_inbound_payment(amount_msats, relative_expiry, None) {
-                                       Ok((payment_hash, payment_secret)) if invoice_request.keys.is_some() => {
-                                               let payment_paths = vec![
-                                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                                               ];
-                                               #[cfg(not(feature = "no-std"))]
-                                               let builder = invoice_request.respond_using_derived_keys(
-                                                       payment_paths, payment_hash
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let created_at = Duration::from_secs(
-                                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let builder = invoice_request.respond_using_derived_keys_no_std(
-                                                       payment_paths, payment_hash, created_at
-                                               );
-                                               match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
-                                                       Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
-                                                       Err(error) => Some(OffersMessage::InvoiceError(error.into())),
-                                               }
-                                       },
-                                       Ok((payment_hash, payment_secret)) => {
-                                               let payment_paths = vec![
-                                                       self.create_one_hop_blinded_payment_path(payment_secret),
-                                               ];
-                                               #[cfg(not(feature = "no-std"))]
-                                               let builder = invoice_request.respond_with(payment_paths, payment_hash);
-                                               #[cfg(feature = "no-std")]
-                                               let created_at = Duration::from_secs(
-                                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
-                                               );
-                                               #[cfg(feature = "no-std")]
-                                               let builder = invoice_request.respond_with_no_std(
-                                                       payment_paths, payment_hash, created_at
-                                               );
-                                               let response = builder.and_then(|builder| builder.allow_mpp().build())
-                                                       .map_err(|e| OffersMessage::InvoiceError(e.into()))
-                                                       .and_then(|invoice|
-                                                               match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
-                                                                       Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
-                                                                       Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
-                                                                                       InvoiceError::from_string("Failed signing invoice".to_string())
-                                                                       )),
-                                                                       Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
-                                                                                       InvoiceError::from_string("Failed invoice signature verification".to_string())
-                                                                       )),
-                                                               });
-                                               match response {
-                                                       Ok(invoice) => Some(invoice),
-                                                       Err(error) => Some(error),
-                                               }
+                               let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+                               let (payment_hash, payment_secret) = match self.create_inbound_payment(
+                                       Some(amount_msats), relative_expiry, None
+                               ) {
+                                       Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
+                                       Err(()) => {
+                                               let error = Bolt12SemanticError::InvalidAmount;
+                                               return Some(OffersMessage::InvoiceError(error.into()));
                                        },
+                               };
+
+                               let payment_paths = match self.create_blinded_payment_paths(
+                                       amount_msats, payment_secret
+                               ) {
+                                       Ok(payment_paths) => payment_paths,
                                        Err(()) => {
-                                               Some(OffersMessage::InvoiceError(Bolt12SemanticError::InvalidAmount.into()))
+                                               let error = Bolt12SemanticError::MissingPaths;
+                                               return Some(OffersMessage::InvoiceError(error.into()));
                                        },
+                               };
+
+                               #[cfg(not(feature = "std"))]
+                               let created_at = Duration::from_secs(
+                                       self.highest_seen_timestamp.load(Ordering::Acquire) as u64
+                               );
+
+                               if invoice_request.keys.is_some() {
+                                       #[cfg(feature = "std")]
+                                       let builder = invoice_request.respond_using_derived_keys(
+                                               payment_paths, payment_hash
+                                       );
+                                       #[cfg(not(feature = "std"))]
+                                       let builder = invoice_request.respond_using_derived_keys_no_std(
+                                               payment_paths, payment_hash, created_at
+                                       );
+                                       match builder.and_then(|b| b.allow_mpp().build_and_sign(secp_ctx)) {
+                                               Ok(invoice) => Some(OffersMessage::Invoice(invoice)),
+                                               Err(error) => Some(OffersMessage::InvoiceError(error.into())),
+                                       }
+                               } else {
+                                       #[cfg(feature = "std")]
+                                       let builder = invoice_request.respond_with(payment_paths, payment_hash);
+                                       #[cfg(not(feature = "std"))]
+                                       let builder = invoice_request.respond_with_no_std(
+                                               payment_paths, payment_hash, created_at
+                                       );
+                                       let response = builder.and_then(|builder| builder.allow_mpp().build())
+                                               .map_err(|e| OffersMessage::InvoiceError(e.into()))
+                                               .and_then(|invoice|
+                                                       match invoice.sign(|invoice| self.node_signer.sign_bolt12_invoice(invoice)) {
+                                                               Ok(invoice) => Ok(OffersMessage::Invoice(invoice)),
+                                                               Err(SignError::Signing(())) => Err(OffersMessage::InvoiceError(
+                                                                               InvoiceError::from_string("Failed signing invoice".to_string())
+                                                               )),
+                                                               Err(SignError::Verification(_)) => Err(OffersMessage::InvoiceError(
+                                                                               InvoiceError::from_string("Failed invoice signature verification".to_string())
+                                                               )),
+                                                       });
+                                       match response {
+                                               Ok(invoice) => Some(invoice),
+                                               Err(error) => Some(error),
+                                       }
                                }
                        },
                        OffersMessage::Invoice(invoice) => {
@@ -9281,6 +9431,7 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
        features.set_channel_type_optional();
        features.set_scid_privacy_optional();
        features.set_zero_conf_optional();
+       features.set_route_blinding_optional();
        if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
                features.set_anchors_zero_fee_htlc_tx_optional();
        }
@@ -9340,6 +9491,8 @@ impl Writeable for ChannelDetails {
                        (37, user_channel_id_high_opt, option),
                        (39, self.feerate_sat_per_1000_weight, option),
                        (41, self.channel_shutdown_state, option),
+                       (43, self.pending_inbound_htlcs, optional_vec),
+                       (45, self.pending_outbound_htlcs, optional_vec),
                });
                Ok(())
        }
@@ -9378,6 +9531,8 @@ impl Readable for ChannelDetails {
                        (37, user_channel_id_high_opt, option),
                        (39, feerate_sat_per_1000_weight, option),
                        (41, channel_shutdown_state, option),
+                       (43, pending_inbound_htlcs, optional_vec),
+                       (45, pending_outbound_htlcs, optional_vec),
                });
 
                // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
@@ -9414,6 +9569,8 @@ impl Readable for ChannelDetails {
                        inbound_htlc_maximum_msat,
                        feerate_sat_per_1000_weight,
                        channel_shutdown_state,
+                       pending_inbound_htlcs: pending_inbound_htlcs.unwrap_or(Vec::new()),
+                       pending_outbound_htlcs: pending_outbound_htlcs.unwrap_or(Vec::new()),
                })
        }
 }
@@ -9426,6 +9583,7 @@ impl_writeable_tlv_based!(PhantomRouteHints, {
 
 impl_writeable_tlv_based!(BlindedForward, {
        (0, inbound_blinding_point, required),
+       (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
 });
 
 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
@@ -9547,6 +9705,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
        (4, htlc_id, required),
        (6, incoming_packet_shared_secret, required),
        (7, user_channel_id, option),
+       // Note that by the time we get past the required read for type 2 above, outpoint will be
+       // filled in, so we can safely unwrap it here.
+       (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
 });
 
 impl Writeable for ClaimableHTLC {
@@ -9698,6 +9859,9 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, {
        (2, prev_short_channel_id, required),
        (4, prev_htlc_id, required),
        (6, prev_funding_outpoint, required),
+       // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
+       // filled in, so we can safely unwrap it here.
+       (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
 });
 
 impl Writeable for HTLCForwardInfo {
@@ -9941,7 +10105,7 @@ where
                }
 
                // Encode without retry info for 0.0.101 compatibility.
-               let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+               let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
                for (id, outbound) in pending_outbound_payments.iter() {
                        match outbound {
                                PendingOutboundPayment::Legacy { session_privs } |
@@ -9969,7 +10133,7 @@ where
                for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
                        for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
                                if !updates.is_empty() {
-                                       if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(HashMap::new()); }
+                                       if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
                                        in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
                                }
                        }
@@ -10158,7 +10322,9 @@ where
                        mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>) -> Self {
                Self {
                        entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
-                       channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
+                       channel_monitors: hash_map_from_iter(
+                               channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
+                       ),
                }
        }
 }
@@ -10205,18 +10371,20 @@ where
                let mut failed_htlcs = Vec::new();
 
                let channel_count: u64 = Readable::read(reader)?;
-               let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
+               let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+               let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
+               let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = VecDeque::new();
                let mut close_background_events = Vec::new();
+               let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
                for _ in 0..channel_count {
                        let mut channel: Channel<SP> = Channel::read(reader, (
                                &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
                        ))?;
                        let logger = WithChannelContext::from(&args.logger, &channel.context);
                        let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+                       funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
                                if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
@@ -10242,13 +10410,13 @@ where
                                                log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
                                                        &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
                                        }
-                                       let mut shutdown_result = channel.context.force_shutdown(true);
+                                       let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
                                        if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
                                                return Err(DecodeError::InvalidValue);
                                        }
-                                       if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update {
+                                       if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update {
                                                close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
-                                                       counterparty_node_id, funding_txo, update
+                                                       counterparty_node_id, funding_txo, channel_id, update
                                                });
                                        }
                                        failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
@@ -10258,6 +10426,7 @@ where
                                                reason: ClosureReason::OutdatedChannelManager,
                                                counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                                channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                               channel_funding_txo: channel.context.get_funding_txo(),
                                        }, None));
                                        for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
                                                let mut found_htlc = false;
@@ -10285,8 +10454,8 @@ where
                                        if let Some(short_channel_id) = channel.context.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
                                        }
-                                       if channel.context.is_funding_broadcast() {
-                                               id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
+                                       if let Some(funding_txo) = channel.context.get_funding_txo() {
+                                               outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
                                        }
                                        match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
                                                hash_map::Entry::Occupied(mut entry) => {
@@ -10294,7 +10463,7 @@ where
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                },
                                                hash_map::Entry::Vacant(entry) => {
-                                                       let mut by_id_map = HashMap::new();
+                                                       let mut by_id_map = new_hash_map();
                                                        by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
                                                        entry.insert(by_id_map);
                                                }
@@ -10304,13 +10473,14 @@ where
                                // If we were persisted and shut down while the initial ChannelMonitor persistence
                                // was in-progress, we never broadcasted the funding transaction and can still
                                // safely discard the channel.
-                               let _ = channel.context.force_shutdown(false);
+                               let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
                                channel_closures.push_back((events::Event::ChannelClosed {
                                        channel_id: channel.context.channel_id(),
                                        user_channel_id: channel.context.get_user_id(),
                                        reason: ClosureReason::DisconnectedPeer,
                                        counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
                                        channel_capacity_sats: Some(channel.context.get_value_satoshis()),
+                                       channel_funding_txo: channel.context.get_funding_txo(),
                                }, None));
                        } else {
                                log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
@@ -10325,19 +10495,22 @@ where
                for (funding_txo, monitor) in args.channel_monitors.iter() {
                        if !funding_txo_set.contains(funding_txo) {
                                let logger = WithChannelMonitor::from(&args.logger, monitor);
+                               let channel_id = monitor.channel_id();
                                log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
-                                       &funding_txo.to_channel_id());
+                                       &channel_id);
                                let monitor_update = ChannelMonitorUpdate {
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
+                                       counterparty_node_id: None,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
+                                       channel_id: Some(monitor.channel_id()),
                                };
-                               close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
                        }
                }
 
                const MAX_ALLOC_SIZE: usize = 1024 * 64;
                let forward_htlcs_count: u64 = Readable::read(reader)?;
-               let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+               let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
                for _ in 0..forward_htlcs_count {
                        let short_channel_id = Readable::read(reader)?;
                        let pending_forwards_count: u64 = Readable::read(reader)?;
@@ -10363,7 +10536,7 @@ where
                let peer_state_from_chans = |channel_by_id| {
                        PeerState {
                                channel_by_id,
-                               inbound_channel_request_by_id: HashMap::new(),
+                               inbound_channel_request_by_id: new_hash_map(),
                                latest_features: InitFeatures::empty(),
                                pending_msg_events: Vec::new(),
                                in_flight_monitor_updates: BTreeMap::new(),
@@ -10374,10 +10547,10 @@ where
                };
 
                let peer_count: u64 = Readable::read(reader)?;
-               let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
+               let mut per_peer_state = hash_map_with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
                for _ in 0..peer_count {
                        let peer_pubkey = Readable::read(reader)?;
-                       let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+                       let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
                        let mut peer_state = peer_state_from_chans(peer_chans);
                        peer_state.latest_features = Readable::read(reader)?;
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -10411,7 +10584,7 @@ where
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
 
                let pending_inbound_payment_count: u64 = Readable::read(reader)?;
-               let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+               let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = hash_map_with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
                for _ in 0..pending_inbound_payment_count {
                        if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
                                return Err(DecodeError::InvalidValue);
@@ -10420,11 +10593,11 @@ where
 
                let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
                let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
-                       HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+                       hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
                for _ in 0..pending_outbound_payments_count_compat {
                        let session_priv = Readable::read(reader)?;
                        let payment = PendingOutboundPayment::Legacy {
-                               session_privs: [session_priv].iter().cloned().collect()
+                               session_privs: hash_set_from_iter([session_priv]),
                        };
                        if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
                                return Err(DecodeError::InvalidValue)
@@ -10434,13 +10607,13 @@ where
                // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
                let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
                let mut pending_outbound_payments = None;
-               let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
+               let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
                let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                let mut claimable_htlc_onion_fields = None;
-               let mut pending_claiming_payments = Some(HashMap::new());
+               let mut pending_claiming_payments = Some(new_hash_map());
                let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
                let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
@@ -10477,7 +10650,7 @@ where
                if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
                } else if pending_outbound_payments.is_none() {
-                       let mut outbounds = HashMap::new();
+                       let mut outbounds = new_hash_map();
                        for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
                                outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
                        }
@@ -10508,12 +10681,13 @@ where
                                $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
                                for update in $chan_in_flight_upds.iter() {
                                        log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
-                                               update.update_id, $channel_info_log, &$funding_txo.to_channel_id());
+                                               update.update_id, $channel_info_log, &$monitor.channel_id());
                                        max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
                                                        counterparty_node_id: $counterparty_node_id,
                                                        funding_txo: $funding_txo,
+                                                       channel_id: $monitor.channel_id(),
                                                        update: update.clone(),
                                                });
                                }
@@ -10524,7 +10698,7 @@ where
                                        pending_background_events.push(
                                                BackgroundEvent::MonitorUpdatesComplete {
                                                        counterparty_node_id: $counterparty_node_id,
-                                                       channel_id: $funding_txo.to_channel_id(),
+                                                       channel_id: $monitor.channel_id(),
                                                });
                                }
                                if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
@@ -10578,21 +10752,22 @@ where
 
                if let Some(in_flight_upds) = in_flight_monitor_updates {
                        for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
-                               let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id()));
+                               let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
+                               let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id);
                                if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
                                        // Now that we've removed all the in-flight monitor updates for channels that are
                                        // still open, we need to replay any monitor updates that are for closed channels,
                                        // creating the neccessary peer_state entries as we go.
                                        let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
-                                               Mutex::new(peer_state_from_chans(HashMap::new()))
+                                               Mutex::new(peer_state_from_chans(new_hash_map()))
                                        });
                                        let mut peer_state = peer_state_mutex.lock().unwrap();
                                        handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
                                                funding_txo, monitor, peer_state, logger, "closed ");
                                } else {
                                        log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
-                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.",
-                                               &funding_txo.to_channel_id());
+                                       log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
+                                               channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
                                        log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
                                        log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
                                        log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
@@ -10620,7 +10795,7 @@ where
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
                        for (_, monitor) in args.channel_monitors.iter() {
-                               let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
+                               let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
                                if counterparty_opt.is_none() {
                                        let logger = WithChannelMonitor::from(&args.logger, monitor);
                                        for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
@@ -10645,7 +10820,7 @@ where
                                                                                retry_strategy: None,
                                                                                attempts: PaymentAttempts::new(),
                                                                                payment_params: None,
-                                                                               session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
+                                                                               session_privs: hash_set_from_iter([session_priv_bytes]),
                                                                                payment_hash: htlc.payment_hash,
                                                                                payment_secret: None, // only used for retries, and we'll never retry on startup
                                                                                payment_metadata: None, // only used for retries, and we'll never retry on startup
@@ -10680,7 +10855,7 @@ where
                                                                                if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
                                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                                log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+                                                                                                       &htlc.payment_hash, &monitor.channel_id());
                                                                                                false
                                                                                        } else { true }
                                                                                } else { true }
@@ -10690,7 +10865,7 @@ where
                                                                pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
                                                                        if pending_forward_matches_htlc(&htlc_info) {
                                                                                log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
-                                                                                       &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id());
+                                                                                       &htlc.payment_hash, &monitor.channel_id());
                                                                                pending_events_read.retain(|(event, _)| {
                                                                                        if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
                                                                                                intercepted_id != ev_id
@@ -10714,6 +10889,7 @@ where
                                                                        let compl_action =
                                                                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                                                                        channel_funding_outpoint: monitor.get_funding_txo().0,
+                                                                                       channel_id: monitor.channel_id(),
                                                                                        counterparty_node_id: path.hops[0].pubkey,
                                                                                };
                                                                        pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
@@ -10739,7 +10915,7 @@ where
                                                                        // channel_id -> peer map entry).
                                                                        counterparty_opt.is_none(),
                                                                        counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
-                                                                       monitor.get_funding_txo().0))
+                                                                       monitor.get_funding_txo().0, monitor.channel_id()))
                                                        } else { None }
                                                } else {
                                                        // If it was an outbound payment, we've handled it above - if a preimage
@@ -10769,7 +10945,7 @@ where
                let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
                let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
 
-               let mut claimable_payments = HashMap::with_capacity(claimable_htlcs_list.len());
+               let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
                if let Some(purposes) = claimable_htlc_purposes {
                        if purposes.len() != claimable_htlcs_list.len() {
                                return Err(DecodeError::InvalidValue);
@@ -10842,7 +11018,7 @@ where
                        }
                }
 
-               let mut outbound_scid_aliases = HashSet::new();
+               let mut outbound_scid_aliases = new_hash_set();
                for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                        let peer_state = &mut *peer_state_lock;
@@ -10912,8 +11088,8 @@ where
                                                // this channel as well. On the flip side, there's no harm in restarting
                                                // without the new monitor persisted - we'll end up right back here on
                                                // restart.
-                                               let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
-                                               if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){
+                                               let previous_channel_id = claimable_htlc.prev_hop.channel_id;
+                                               if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) {
                                                        let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
@@ -10945,14 +11121,14 @@ where
                                        for action in actions.iter() {
                                                if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
                                                        downstream_counterparty_and_funding_outpoint:
-                                                               Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+                                                               Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
                                                } = action {
-                                                       if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+                                                       if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
                                                                log_trace!(logger,
                                                                        "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
-                                                                       blocked_channel_outpoint.to_channel_id());
+                                                                       blocked_channel_id);
                                                                blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
-                                                                       .entry(blocked_channel_outpoint.to_channel_id())
+                                                                       .entry(*blocked_channel_id)
                                                                        .or_insert_with(Vec::new).push(blocking_action.clone());
                                                        } else {
                                                                // If the channel we were blocking has closed, we don't need to
@@ -10991,7 +11167,7 @@ where
                        forward_htlcs: Mutex::new(forward_htlcs),
                        claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
-                       id_to_peer: Mutex::new(id_to_peer),
+                       outpoint_to_peer: Mutex::new(outpoint_to_peer),
                        short_to_chan_info: FairRwLock::new(short_to_chan_info),
                        fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
 
@@ -11032,12 +11208,12 @@ where
                        channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
 
-               for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
+               for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
                        // We use `downstream_closed` in place of `from_onchain` here just as a guess - we
                        // don't remember in the `ChannelMonitor` where we got a preimage from, but if the
                        // channel is closed we just assume that it probably came from an on-chain claim.
-                       channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
-                               downstream_closed, true, downstream_node_id, downstream_funding);
+                       channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
+                               downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -11610,8 +11786,8 @@ mod tests {
        }
 
        #[test]
-       fn test_id_to_peer_coverage() {
-               // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+       fn test_outpoint_to_peer_coverage() {
+               // Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned
                // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
                // the channel is successfully closed.
                let chanmon_cfgs = create_chanmon_cfgs(2);
@@ -11625,42 +11801,42 @@ mod tests {
                let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
                nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
 
-               let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+               let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
                let channel_id = ChannelId::from_bytes(tx.txid().to_byte_array());
                {
-                       // Ensure that the `id_to_peer` map is empty until either party has received the
+                       // Ensure that the `outpoint_to_peer` map is empty until either party has received the
                        // funding transaction, and have the real `channel_id`.
-                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
-                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
                }
 
                nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
                {
-                       // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+                       // Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as
                        // as it has the funding transaction.
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
 
-               assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
 
                let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 
                nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
                {
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
                expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
 
                {
-                       // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
-                       // as it has the funding transaction.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       // Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as
+                       // soon as it has the funding transaction.
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
                check_added_monitors!(nodes[1], 1);
                let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
@@ -11679,23 +11855,23 @@ mod tests {
                let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
                {
-                       // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+                       // Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the
                        // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
                        // fee for the closing transaction has been negotiated and the parties has the other
                        // party's signature for the fee negotiated closing transaction.)
-                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
-                       assert!(nodes_0_lock.contains_key(&channel_id));
+                       assert!(nodes_0_lock.contains_key(&funding_output));
                }
 
                {
                        // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
                        // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
                        // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
-                       // kept in the `nodes[1]`'s `id_to_peer` map.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       // kept in the `nodes[1]`'s `outpoint_to_peer` map.
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
 
                nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
@@ -11703,29 +11879,29 @@ mod tests {
                        // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
                        // therefore has all it needs to fully close the channel (both signatures for the
                        // closing transaction).
-                       // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+                       // Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be
                        // fully closed by `nodes[0]`.
-                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
 
-                       // Assert that the channel is still in `nodes[1]`'s  `id_to_peer` map, as `nodes[1]`
+                       // Assert that the channel is still in `nodes[1]`'s  `outpoint_to_peer` map, as `nodes[1]`
                        // doesn't have `nodes[0]`'s signature for the closing transaction yet.
-                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
                        assert_eq!(nodes_1_lock.len(), 1);
-                       assert!(nodes_1_lock.contains_key(&channel_id));
+                       assert!(nodes_1_lock.contains_key(&funding_output));
                }
 
                let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
 
                nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
                {
-                       // Assert that the channel has now been removed from both parties `id_to_peer` map once
+                       // Assert that the channel has now been removed from both parties `outpoint_to_peer` map once
                        // they both have everything required to fully close the channel.
-                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
                }
                let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
 
-               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
-               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+               check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
        }
 
        fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
@@ -12049,8 +12225,8 @@ mod tests {
                let sender_intended_amt_msat = 100;
                let extra_fee_msat = 10;
                let hop_data = msgs::InboundOnionPayload::Receive {
-                       amt_msat: 100,
-                       outgoing_cltv_value: 42,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 42,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {
@@ -12061,7 +12237,7 @@ mod tests {
                // Check that if the amount we received + the penultimate hop extra fee is less than the sender
                // intended amount, we fail the payment.
                let current_height: u32 = node[0].node.best_block.read().unwrap().height();
-               if let Err(crate::ln::channelmanager::InboundOnionErr { err_code, .. }) =
+               if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
                        create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
                                sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
                                current_height, node[0].node.default_configuration.accept_mpp_keysend)
@@ -12071,8 +12247,8 @@ mod tests {
 
                // If amt_received + extra_fee is equal to the sender intended amount, we're fine.
                let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
-                       amt_msat: 100,
-                       outgoing_cltv_value: 42,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 42,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {
@@ -12095,8 +12271,8 @@ mod tests {
 
                let current_height: u32 = node[0].node.best_block.read().unwrap().height();
                let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
-                       amt_msat: 100,
-                       outgoing_cltv_value: 22,
+                       sender_intended_htlc_amt_msat: 100,
+                       cltv_expiry_height: 22,
                        payment_metadata: None,
                        keysend_preimage: None,
                        payment_data: Some(msgs::FinalOnionHopData {
@@ -12375,7 +12551,7 @@ mod tests {
 
 
                let (scid_1, scid_2) = (42, 43);
-               let mut forward_htlcs = HashMap::new();
+               let mut forward_htlcs = new_hash_map();
                forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
                forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
 
@@ -12414,7 +12590,7 @@ pub mod bench {
        use bitcoin::blockdata::locktime::absolute::LockTime;
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
-       use bitcoin::{Block, Transaction, TxOut};
+       use bitcoin::{Transaction, TxOut};
 
        use crate::sync::{Arc, Mutex, RwLock};
 
@@ -12454,7 +12630,7 @@ pub mod bench {
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
                let scorer = RwLock::new(test_utils::TestScorer::new());
-               let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
+               let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
 
                let mut config: UserConfig = Default::default();
                config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);