Merge pull request #1638 from ViktorTigerstrom/2022-07-update-decode-update-add-htlc...
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 9e5f7e9d614f3db891f7597a1289ec62a35b29a1..692d46eda90e8937ec289eed73f47fff6c8d5c8c 100644 (file)
 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
 //! upon reconnect to the relevant peer(s).
 //!
-//! It does not manage routing logic (see routing::router::get_route for that) nor does it manage constructing
+//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
 //! imply it needs to fail HTLCs/payments/channels it manages).
 //!
+//! [`find_route`]: crate::routing::router::find_route
 
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
 
-use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::Hash;
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hash_types::{BlockHash, Txid};
@@ -35,7 +36,7 @@ use bitcoin::secp256k1;
 
 use chain;
 use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
@@ -47,11 +48,11 @@ use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParame
 use ln::msgs;
 use ln::msgs::NetAddress;
 use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
+use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
 use ln::wire::Encode;
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
-use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::config::{UserConfig, ChannelConfig};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use util::{byte_utils, events};
 use util::scid_utils::fake_scid;
 use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
@@ -169,7 +170,7 @@ enum OnionPayload {
        Invoice {
                /// This is only here for backwards-compatibility in serialization, in the future it can be
                /// removed, breaking clients running 0.0.106 and earlier.
-               _legacy_hop_data: msgs::FinalOnionHopData,
+               _legacy_hop_data: Option<msgs::FinalOnionHopData>,
        },
        /// Contains the payer-provided preimage.
        Spontaneous(PaymentPreimage),
@@ -280,7 +281,7 @@ enum ClaimFundsFromHop {
        DuplicateClaim,
 }
 
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -368,15 +369,6 @@ impl MsgHandleErrInternal {
                                                },
                                        },
                                },
-                               ChannelError::CloseDelayBroadcast(msg) => LightningError {
-                                       err: msg.clone(),
-                                       action: msgs::ErrorAction::SendErrorMessage {
-                                               msg: msgs::ErrorMessage {
-                                                       channel_id,
-                                                       data: msg
-                                               },
-                                       },
-                               },
                        },
                        chan_id: None,
                        shutdown_finish: None,
@@ -405,10 +397,12 @@ pub(super) enum RAACommitmentOrder {
 // Note this is only exposed in cfg(test):
 pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
-       /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added
-       /// here once the channel is available for normal use, with SCIDs being added once the funding
-       /// transaction is confirmed at the channel's required confirmation depth.
-       pub(super) short_to_id: HashMap<u64, [u8; 32]>,
+       /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
+       ///
+       /// Outbound SCID aliases are added here once the channel is available for normal use, with
+       /// SCIDs being added once the funding transaction is confirmed at the channel's required
+       /// confirmation depth.
+       pub(super) short_to_chan_info: HashMap<u64, (PublicKey, [u8; 32])>,
        /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
        ///
        /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
@@ -419,11 +413,13 @@ pub(super) struct ChannelHolder<Signer: Sign> {
        /// guarantees are made about the existence of a channel with the short id here, nor the short
        /// ids in the PendingHTLCInfo!
        pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
-       /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
+       /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+       /// failed/claimed by the user.
+       ///
        /// Note that while this is held in the same mutex as the channels themselves, no consistency
        /// guarantees are made about the channels given here actually existing anymore by the time you
        /// go to read them!
-       claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
+       claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
        /// Messages to send to peers - pushed to in the same lock that they are generated in (except
        /// for broadcast messages, where ordering isn't as strict).
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
@@ -692,7 +688,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
 {
        default_configuration: UserConfig,
        genesis_hash: BlockHash,
-       fee_estimator: F,
+       fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
 
@@ -734,6 +730,25 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// active channel list on load.
        outbound_scid_aliases: Mutex<HashSet<u64>>,
 
+       /// `channel_id` -> `counterparty_node_id`.
+       ///
+       /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
+       /// multiple channels with the same `temporary_channel_id` to different peers can exist,
+       /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+       ///
+       /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
+       /// the corresponding channel for the event, as we only have access to the `channel_id` during
+       /// the handling of the events.
+       ///
+       /// TODO:
+       /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
+       /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
+       /// would break backwards compatability.
+       /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
+       /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
+       /// required to access the channel with the `counterparty_node_id`.
+       id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
 
@@ -746,6 +761,11 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// [fake scids]: crate::util::scid_utils::fake_scid
        fake_scid_rand_bytes: [u8; 32],
 
+       /// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
+       /// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
+       /// keeping additional state.
+       probing_cookie_secret: [u8; 32],
+
        /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
        /// value increases strictly since we don't assume access to a time source.
        last_node_announcement_serial: AtomicUsize,
@@ -867,7 +887,13 @@ pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
 // the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
 // CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
 pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
-pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
+// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
+// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
+// scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
+// while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
+// routing failure for any HTLC sender picking up an LDK node among the first hops.
+pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
 
 /// Minimum CLTV difference between the current block height and received inbound payments.
 /// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
@@ -1072,18 +1098,18 @@ pub struct ChannelDetails {
        pub force_close_spend_delay: Option<u16>,
        /// True if the channel was initiated (and thus funded) by us.
        pub is_outbound: bool,
-       /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
-       /// channel is not currently being shut down. `funding_locked` message exchange implies the
+       /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
+       /// channel is not currently being shut down. `channel_ready` message exchange implies the
        /// required confirmation count has been reached (and we were connected to the peer at some
        /// point after the funding transaction received enough confirmations). The required
        /// confirmation count is provided in [`confirmations_required`].
        ///
        /// [`confirmations_required`]: ChannelDetails::confirmations_required
-       pub is_funding_locked: bool,
-       /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+       pub is_channel_ready: bool,
+       /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
        /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
        ///
-       /// This is a strict superset of `is_funding_locked`.
+       /// This is a strict superset of `is_channel_ready`.
        pub is_usable: bool,
        /// True if this channel is (or will be) publicly-announced.
        pub is_public: bool,
@@ -1092,6 +1118,10 @@ pub struct ChannelDetails {
        pub inbound_htlc_minimum_msat: Option<u64>,
        /// The largest value HTLC (in msat) we currently will accept, for this channel.
        pub inbound_htlc_maximum_msat: Option<u64>,
+       /// Set of configurable parameters that affect channel operation.
+       ///
+       /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
+       pub config: Option<ChannelConfig>,
 }
 
 impl ChannelDetails {
@@ -1226,9 +1256,9 @@ macro_rules! handle_error {
 }
 
 macro_rules! update_maps_on_chan_removal {
-       ($self: expr, $short_to_id: expr, $channel: expr) => {
+       ($self: expr, $short_to_chan_info: expr, $channel: expr) => {
                if let Some(short_id) = $channel.get_short_channel_id() {
-                       $short_to_id.remove(&short_id);
+                       $short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
                        // outbound SCID alias we used for it from the collision-prevention set. While we
@@ -1239,13 +1269,14 @@ macro_rules! update_maps_on_chan_removal {
                        let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               $short_to_id.remove(&$channel.outbound_scid_alias());
+               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+               $short_to_chan_info.remove(&$channel.outbound_scid_alias());
        }
 }
 
 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
 macro_rules! convert_chan_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+       ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => {
                match $err {
                        ChannelError::Warn(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
@@ -1255,18 +1286,11 @@ macro_rules! convert_chan_err {
                        },
                        ChannelError::Close(msg) => {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $short_to_id, $channel);
+                               update_maps_on_chan_removal!($self, $short_to_chan_info, $channel);
                                let shutdown_res = $channel.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
                        },
-                       ChannelError::CloseDelayBroadcast(msg) => {
-                               log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $short_to_id, $channel);
-                               let shutdown_res = $channel.force_shutdown(false);
-                               (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
-                                       shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
-                       }
                }
        }
 }
@@ -1276,7 +1300,7 @@ macro_rules! break_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1291,7 +1315,7 @@ macro_rules! try_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1305,18 +1329,18 @@ macro_rules! remove_channel {
        ($self: expr, $channel_state: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel);
+                       update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel);
                        channel
                }
        }
 }
 
 macro_rules! handle_monitor_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+       ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
                        ChannelMonitorUpdateErr::PermanentFailure => {
                                log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
-                               update_maps_on_chan_removal!($self, $short_to_id, $chan);
+                               update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
                                // will be responsible for failing backwards once things confirm on-chain.
@@ -1350,13 +1374,13 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
                }
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_funding_locked: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_funding_locked, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
@@ -1369,8 +1393,8 @@ macro_rules! handle_monitor_err {
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_funding_locked: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_funding_locked, Vec::new(), Vec::new(), Vec::new())
+       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
                handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
@@ -1401,20 +1425,20 @@ macro_rules! maybe_break_monitor_err {
        }
 }
 
-macro_rules! send_funding_locked {
-       ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => {
-               $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+macro_rules! send_channel_ready {
+       ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
+               $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
                        node_id: $channel.get_counterparty_node_id(),
-                       msg: $funding_locked_msg,
+                       msg: $channel_ready_msg,
                });
-               // Note that we may send a funding locked multiple times for a channel if we reconnect, so
+               // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
-               let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id());
-               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(),
+               let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
+               assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                        "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                if let Some(real_scid) = $channel.get_short_channel_id() {
-                       let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id());
-                       assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(),
+                       let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
+                       assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                                "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                }
        }
@@ -1423,7 +1447,7 @@ macro_rules! send_funding_locked {
 macro_rules! handle_chan_restoration_locked {
        ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
         $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
-        $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { {
+        $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
                let mut htlc_forwards = None;
 
                let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
@@ -1437,24 +1461,24 @@ macro_rules! handle_chan_restoration_locked {
                        }
 
                        if chanmon_update.is_some() {
-                               // On reconnect, we, by definition, only resend a funding_locked if there have been
+                               // On reconnect, we, by definition, only resend a channel_ready if there have been
                                // no commitment updates, so the only channel monitor update which could also be
-                               // associated with a funding_locked would be the funding_created/funding_signed
+                               // associated with a channel_ready would be the funding_created/funding_signed
                                // monitor update. That monitor update failing implies that we won't send
-                               // funding_locked until it's been updated, so we can't have a funding_locked and a
+                               // channel_ready until it's been updated, so we can't have a channel_ready and a
                                // monitor update here (so we don't bother to handle it correctly below).
-                               assert!($funding_locked.is_none());
-                               // A channel monitor update makes no sense without either a funding_locked or a
-                               // commitment update to process after it. Since we can't have a funding_locked, we
+                               assert!($channel_ready.is_none());
+                               // A channel monitor update makes no sense without either a channel_ready or a
+                               // commitment update to process after it. Since we can't have a channel_ready, we
                                // only bother to handle the monitor-update + commitment_update case below.
                                assert!($commitment_update.is_some());
                        }
 
-                       if let Some(msg) = $funding_locked {
-                               // Similar to the above, this implies that we're letting the funding_locked fly
+                       if let Some(msg) = $channel_ready {
+                               // Similar to the above, this implies that we're letting the channel_ready fly
                                // before it should be allowed to.
                                assert!(chanmon_update.is_none());
-                               send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
+                               send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg);
                        }
                        if let Some(msg) = $announcement_sigs {
                                $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@ -1568,7 +1592,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                ChannelManager {
                        default_configuration: config.clone(),
                        genesis_hash: genesis_block(params.network).header.block_hash(),
-                       fee_estimator: fee_est,
+                       fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
 
@@ -1576,7 +1600,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
-                               short_to_id: HashMap::new(),
+                               short_to_chan_info: HashMap::new(),
                                forward_htlcs: HashMap::new(),
                                claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
@@ -1584,6 +1608,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
+                       id_to_peer: Mutex::new(HashMap::new()),
 
                        our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
@@ -1592,6 +1617,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        inbound_payment_key: expanded_inbound_key,
                        fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(),
 
+                       probing_cookie_secret: keys_manager.get_secure_random_bytes(),
+
                        last_node_announcement_serial: AtomicUsize::new(0),
                        highest_seen_timestamp: AtomicUsize::new(0),
 
@@ -1608,7 +1635,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Gets the current configuration applied to all new channels,  as
+       /// Gets the current configuration applied to all new channels.
        pub fn get_current_default_configuration(&self) -> &UserConfig {
                &self.default_configuration
        }
@@ -1752,11 +1779,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        confirmations_required: channel.minimum_depth(),
                                        force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
                                        is_outbound: channel.is_outbound(),
-                                       is_funding_locked: channel.is_usable(),
+                                       is_channel_ready: channel.is_usable(),
                                        is_usable: channel.is_live(),
                                        is_public: channel.should_announce(),
                                        inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
-                                       inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat()
+                                       inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
+                                       config: Some(channel.config()),
                                });
                        }
                }
@@ -1775,12 +1803,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.list_channels_with_filter(|_| true)
        }
 
-       /// Gets the list of usable channels, in random order. Useful as an argument to
-       /// get_route to ensure non-announced channels are used.
+       /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
+       /// to ensure non-announced channels are used.
        ///
        /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
        /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
        /// are.
+       ///
+       /// [`find_route`]: crate::routing::router::find_route
        pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
                // Note we use is_live here instead of usable which leads to somewhat confused
                // internal/external nomenclature, but that's ok cause that's probably what the user
@@ -1831,7 +1861,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if let Some(monitor_update) = monitor_update {
                                                if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
                                                        let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
                                                        if is_permanent {
                                                                remove_channel!(self, channel_state, chan_entry);
                                                                break result;
@@ -1860,7 +1890,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -1916,7 +1947,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
@@ -1929,7 +1962,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        /// `peer_msg` should be set when we receive a message from a peer, but not set when the
        /// user closes, which will be re-exposed as the `ChannelClosed` reason.
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+       -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -1948,7 +1982,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
-               self.finish_force_close_channel(chan.force_shutdown(true));
+               self.finish_force_close_channel(chan.force_shutdown(broadcast));
                if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1959,13 +1993,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(chan.get_counterparty_node_id())
        }
 
-       /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
-       /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
-       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
-       /// channel.
-       pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+       fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
+               match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
                                self.channel_state.lock().unwrap().pending_msg_events.push(
                                        events::MessageSendEvent::HandleError {
@@ -1981,11 +2011,39 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
+       /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
+       /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
+       /// channel.
+       pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       -> Result<(), APIError> {
+               self.force_close_sending_error(channel_id, counterparty_node_id, true)
+       }
+
+       /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
+       /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
+       /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+       ///
+       /// You can always get the latest local transaction(s) to broadcast from
+       /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+       pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+       -> Result<(), APIError> {
+               self.force_close_sending_error(channel_id, counterparty_node_id, false)
+       }
+
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
        /// for each to the chain and rejecting new HTLCs on each.
-       pub fn force_close_all_channels(&self) {
+       pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+               for chan in self.list_channels() {
+                       let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+               }
+       }
+
+       /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
+       /// local transaction(s).
+       pub fn force_close_all_channels_without_broadcasting_txn(&self) {
                for chan in self.list_channels() {
-                       let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
+                       let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
                }
        }
 
@@ -2086,17 +2144,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                })
        }
 
-       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
+       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
                macro_rules! return_malformed_err {
                        ($msg: expr, $err_code: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
                                                failure_code: $err_code,
-                                       })), self.channel_state.lock().unwrap());
+                                       }));
                                }
                        }
                }
@@ -2116,25 +2174,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        //node knows the HMAC matched, so they already know what is there...
                        return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
                }
-
-               let mut channel_state = None;
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       if channel_state.is_none() {
-                                               channel_state = Some(self.channel_state.lock().unwrap());
-                                       }
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
-                                       })), channel_state.unwrap());
+                                       }));
                                }
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2159,22 +2212,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                        },
                        onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
-                               let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
-
-                               let blinding_factor = {
-                                       let mut sha = Sha256::engine();
-                                       sha.input(&new_pubkey.serialize()[..]);
-                                       sha.input(&shared_secret);
-                                       Sha256::from_engine(sha).into_inner()
-                               };
-
-                               let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
-                                       Err(e)
-                               } else { Ok(new_pubkey) };
-
+                               let new_pubkey = msg.onion_routing_packet.public_key.unwrap();
                                let outgoing_packet = msgs::OnionPacket {
                                        version: 0,
-                                       public_key,
+                                       public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret),
                                        hop_data: new_packet_bytes,
                                        hmac: next_hop_hmac.clone(),
                                };
@@ -2200,14 +2241,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
 
-               channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
                        // with a short_channel_id of 0. This is important as various things later assume
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
-                               let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
                                if let Some((err, code, chan_update)) = loop {
+                                       let mut channel_state = self.channel_state.lock().unwrap();
+                                       let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned();
                                        let forwarding_id_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
@@ -2218,10 +2259,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
                                                        }
                                                },
-                                               Some(id) => Some(id.clone()),
+                                               Some((_cp_id, chan_id)) => Some(chan_id.clone()),
                                        };
-                                       let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt {
-                                               let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+                                       let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
+                                               let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
@@ -2247,18 +2288,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
                                                        break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
                                                }
-                                               let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
-                                                       .and_then(|prop_fee| { (prop_fee / 1000000)
-                                                       .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
-                                               if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
-                                                       break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, chan_update_opt));
+                                               if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) {
+                                                       break Some((err, code, chan_update_opt));
+                                               }
+                                               chan_update_opt
+                                       } else {
+                                               if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+                                                       break Some((
+                                                               "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+                                                               0x1000 | 13, None,
+                                                       ));
                                                }
-                                               (chan_update_opt, chan.get_cltv_expiry_delta())
-                                       } else { (None, MIN_CLTV_EXPIRY_DELTA) };
+                                               None
+                                       };
 
-                                       if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + forwardee_cltv_expiry_delta as u64 { // incorrect_cltv_expiry
-                                               break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, chan_update_opt));
-                                       }
                                        let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
                                        // but we want to be robust wrt to counterparty packet sanitization (see
@@ -2305,7 +2348,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               (pending_forward_info, channel_state.unwrap())
+               pending_forward_info
        }
 
        /// Gets the current channel_update for the given channel. This first checks if the channel is
@@ -2352,7 +2395,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
-                       htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
+                       htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
                        fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
@@ -2397,9 +2440,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                        }
 
-                       let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
+                       let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
-                               Some(id) => id.clone(),
+                               Some((_cp_id, chan_id)) => chan_id.clone(),
                        };
 
                        macro_rules! insert_outbound_payment {
@@ -2524,12 +2567,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if route.paths.len() < 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
                }
-               if route.paths.len() > 10 {
-                       // This limit is completely arbitrary - there aren't any real fundamental path-count
-                       // limits. After we support retrying individual paths we should likely bump this, but
-                       // for now more than 10 paths likely carries too much one-path failure.
-                       return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
-               }
                if payment_secret.is_none() && route.paths.len() > 1 {
                        return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
                }
@@ -2722,6 +2759,43 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Send a payment that is probing the given route for liquidity. We calculate the
+       /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
+       /// us to easily discern them from real payments.
+       pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+               let payment_id = PaymentId(self.keys_manager.get_secure_random_bytes());
+
+               let payment_hash = self.probing_cookie_from_id(&payment_id);
+
+               if hops.len() < 2 {
+                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                               err: "No need probing a path with less than two hops".to_string()
+                       }))
+               }
+
+               let route = Route { paths: vec![hops], payment_params: None };
+
+               match self.send_payment_internal(&route, payment_hash, &None, None, Some(payment_id), None) {
+                       Ok(payment_id) => Ok((payment_hash, payment_id)),
+                       Err(e) => Err(e)
+               }
+       }
+
+       /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
+       /// payment probe.
+       pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
+               let target_payment_hash = self.probing_cookie_from_id(payment_id);
+               target_payment_hash == *payment_hash
+       }
+
+       /// Returns the 'probing cookie' for the given [`PaymentId`].
+       fn probing_cookie_from_id(&self, payment_id: &PaymentId) -> PaymentHash {
+               let mut preimage = [0u8; 64];
+               preimage[..32].copy_from_slice(&self.probing_cookie_secret);
+               preimage[32..].copy_from_slice(&payment_id.0);
+               PaymentHash(Sha256::hash(&preimage).into_inner())
+       }
+
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>(
@@ -2760,6 +2834,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
+                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                               }
                                e.insert(chan);
                        }
                }
@@ -2778,6 +2856,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
        /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
        ///
+       /// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
+       /// across the p2p network.
+       ///
        /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
        /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
        ///
@@ -2793,6 +2874,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// not currently support replacing a funding transaction on an existing channel. Instead,
        /// create a new channel with a conflicting funding transaction.
        ///
+       /// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
+       /// the wallet software generating the funding transaction to apply anti-fee sniping as
+       /// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
+       /// for more details.
+       ///
        /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
        /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
        pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
@@ -2805,6 +2891,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                });
                        }
                }
+               {
+                       let height = self.best_block.read().unwrap().height();
+                       // Transactions are evaluated as final by network mempools at the next block. However, the modules
+                       // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
+                       // the wallet module is in advance on the LDK view, allow one more block of headroom.
+                       // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
+                       if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 {
+                               return Err(APIError::APIMisuseError {
+                                       err: "Funding transaction absolute timelock is non-final".to_owned()
+                               });
+                       }
+               }
                self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
                        let mut output_index = None;
                        let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
@@ -2834,15 +2932,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        #[allow(dead_code)]
        // Messages of up to 64KB should never end up more than half full with addresses, as that would
-       // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
+       // be absurd. We ensure this by checking that at least 100 (our stated public contract on when
        // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
        // message...
        const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
        #[deny(const_err)]
        #[allow(dead_code)]
        // ...by failing to compile if the number of addresses that would be half of a message is
-       // smaller than 500:
-       const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
+       // smaller than 100:
+       const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100;
 
        /// Regenerates channel_announcements and generates a signed node_announcement from the given
        /// arguments, providing them in corresponding events via
@@ -2859,13 +2957,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// tying these addresses together and to this node. If you wish to preserve user privacy,
        /// addresses should likely contain only Tor Onion addresses.
        ///
-       /// Panics if `addresses` is absurdly large (more than 500).
+       /// Panics if `addresses` is absurdly large (more than 100).
        ///
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
        pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               if addresses.len() > 500 {
+               if addresses.len() > 100 {
                        panic!("More than half the message size was taken up by public addresses!");
                }
 
@@ -2899,7 +2997,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                });
                                announced_chans = true;
                        } else {
-                               // If the channel is not public or has not yet reached funding_locked, check the
+                               // If the channel is not public or has not yet reached channel_ready, check the
                                // next channel. If we don't yet have any public channels, we'll skip the broadcast
                                // below as peers may not accept it without channels on chain first.
                        }
@@ -2915,6 +3013,73 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Atomically updates the [`ChannelConfig`] for the given channels.
+       ///
+       /// Once the updates are applied, each eligible channel (advertised with a known short channel
+       /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
+       /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
+       /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
+       ///
+       /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
+       /// `counterparty_node_id` is provided.
+       ///
+       /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
+       /// below [`MIN_CLTV_EXPIRY_DELTA`].
+       ///
+       /// If an error is returned, none of the updates should be considered applied.
+       ///
+       /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
+       /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
+       /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
+       /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
+       /// [`ChannelUpdate`]: msgs::ChannelUpdate
+       /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
+       /// [`APIMisuseError`]: APIError::APIMisuseError
+       pub fn update_channel_config(
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+       ) -> Result<(), APIError> {
+               if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
+                       return Err(APIError::APIMisuseError {
+                               err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
+                       });
+               }
+
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
+                       &self.total_consistency_lock, &self.persistence_notifier,
+               );
+               {
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
+                       for channel_id in channel_ids {
+                               let channel_counterparty_node_id = channel_state.by_id.get(channel_id)
+                                       .ok_or(APIError::ChannelUnavailable {
+                                               err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
+                                       })?
+                                       .get_counterparty_node_id();
+                               if channel_counterparty_node_id != *counterparty_node_id {
+                                       return Err(APIError::APIMisuseError {
+                                               err: "counterparty node id mismatch".to_owned(),
+                                       });
+                               }
+                       }
+                       for channel_id in channel_ids {
+                               let channel = channel_state.by_id.get_mut(channel_id).unwrap();
+                               if !channel.update_config(config) {
+                                       continue;
+                               }
+                               if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+                               } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+                                               node_id: channel.get_counterparty_node_id(),
+                                               msg,
+                                       });
+                               }
+                       }
+               }
+               Ok(())
+       }
+
        /// Processes HTLCs which are pending waiting on random forward delay.
        ///
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
@@ -2932,29 +3097,50 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                        for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
                                if short_chan_id != 0 {
-                                       let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
-                                               Some(chan_id) => chan_id.clone(),
+                                       let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
+                                               Some((_cp_id, chan_id)) => chan_id.clone(),
                                                None => {
                                                        for forward_info in pending_forwards.drain(..) {
                                                                match forward_info {
                                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                                routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
+                                                                                       macro_rules! failure_handler {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+                                                                                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+                                                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                                                               short_channel_id: prev_short_channel_id,
+                                                                                                               outpoint: prev_funding_outpoint,
+                                                                                                               htlc_id: prev_htlc_id,
+                                                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                                                               phantom_shared_secret: $phantom_ss,
+                                                                                                       });
+
+                                                                                                       let reason = if $next_hop_unknown {
+                                                                                                               HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+                                                                                                       } else {
+                                                                                                               HTLCDestination::FailedPayment{ payment_hash }
+                                                                                                       };
+
+                                                                                                       failed_forwards.push((htlc_source, payment_hash,
+                                                                                                               HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+                                                                                                               reason
+                                                                                                       ));
+                                                                                                       continue;
+                                                                                               }
+                                                                                       }
                                                                                        macro_rules! fail_forward {
                                                                                                ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
                                                                                                        {
-                                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                                                       outpoint: prev_funding_outpoint,
-                                                                                                                       htlc_id: prev_htlc_id,
-                                                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                                                       phantom_shared_secret: $phantom_ss,
-                                                                                                               });
-                                                                                                               failed_forwards.push((htlc_source, payment_hash,
-                                                                                                                       HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
-                                                                                                               ));
-                                                                                                               continue;
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                                       macro_rules! failed_payment {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+                                                                                                       {
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -2962,7 +3148,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
                                                                                                        let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
-                                                                                                       let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                                       let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                        let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -2970,17 +3156,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                                        // `update_fail_malformed_htlc`, meaning here we encrypt the error as
                                                                                                                        // if it came from us (the second-to-last hop) but contains the sha256
                                                                                                                        // of the onion.
-                                                                                                                       fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+                                                                                                                       failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
                                                                                                                },
                                                                                                                Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                                                                                                                       fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+                                                                                                                       failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
                                                                                                                },
                                                                                                        };
                                                                                                        match next_hop {
                                                                                                                onion_utils::Hop::Receive(hop_data) => {
                                                                                                                        match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
                                                                                                                                Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
-                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                        }
                                                                                                                },
                                                                                                                _ => panic!(),
@@ -3031,7 +3217,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        }
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code, data }
+                                                                                               HTLCFailReason::Reason { failure_code, data },
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                },
@@ -3101,7 +3288,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        // ChannelClosed event is generated by handle_error for us.
                                                                                        Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
                                                                                },
-                                                                               ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
                                                                        };
                                                                        handle_errors.push((counterparty_node_id, err));
                                                                        continue;
@@ -3136,7 +3322,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        prev_funding_outpoint } => {
                                                                let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
                                                                        PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
-                                                                               let _legacy_hop_data = payment_data.clone();
+                                                                               let _legacy_hop_data = Some(payment_data.clone());
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
                                                                        },
                                                                        PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
@@ -3161,7 +3347,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                };
 
                                                                macro_rules! fail_htlc {
-                                                                       ($htlc: expr) => {
+                                                                       ($htlc: expr, $payment_hash: expr) => {
                                                                                let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
                                                                                        &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
@@ -3173,7 +3359,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
                                                                                        }), payment_hash,
-                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                                                                       HTLCDestination::FailedPayment { payment_hash: $payment_hash },
                                                                                ));
                                                                        }
                                                                }
@@ -3181,12 +3368,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                macro_rules! check_total_value {
                                                                        ($payment_data: expr, $payment_preimage: expr) => {{
                                                                                let mut payment_received_generated = false;
-                                                                               let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
-                                                                                       .or_insert(Vec::new());
+                                                                               let purpose = || {
+                                                                                       events::PaymentPurpose::InvoicePayment {
+                                                                                               payment_preimage: $payment_preimage,
+                                                                                               payment_secret: $payment_data.payment_secret,
+                                                                                       }
+                                                                               };
+                                                                               let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash)
+                                                                                       .or_insert_with(|| (purpose(), Vec::new()));
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                continue
                                                                                        }
                                                                                }
@@ -3208,16 +3401,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
                                                                                                log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
                                                                                                payment_hash,
-                                                                                               purpose: events::PaymentPurpose::InvoicePayment {
-                                                                                                       payment_preimage: $payment_preimage,
-                                                                                                       payment_secret: $payment_data.payment_secret,
-                                                                                               },
-                                                                                               amt: total_value,
+                                                                                               purpose: purpose(),
+                                                                                               amount_msat: total_value,
                                                                                        });
                                                                                        payment_received_generated = true;
                                                                                } else {
@@ -3245,7 +3435,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                                continue
                                                                                                        }
                                                                                                };
@@ -3254,16 +3444,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        OnionPayload::Spontaneous(preimage) => {
                                                                                                match channel_state.claimable_htlcs.entry(payment_hash) {
                                                                                                        hash_map::Entry::Vacant(e) => {
-                                                                                                               e.insert(vec![claimable_htlc]);
+                                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+                                                                                                               e.insert((purpose.clone(), vec![claimable_htlc]));
                                                                                                                new_events.push(events::Event::PaymentReceived {
                                                                                                                        payment_hash,
-                                                                                                                       amt: amt_to_forward,
-                                                                                                                       purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+                                                                                                                       amount_msat: amt_to_forward,
+                                                                                                                       purpose,
                                                                                                                });
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                                                                                                                log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -3272,17 +3463,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
                                                                                if payment_data.is_none() {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                        continue
                                                                                };
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
                                                                                        let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
@@ -3301,8 +3492,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+               for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
                }
                self.forward_htlcs(&mut phantom_receives);
 
@@ -3346,7 +3537,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_background_events();
        }
 
-       fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+       fn update_channel_fee(&self, short_to_chan_info: &mut HashMap<u64, (PublicKey, [u8; 32])>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
                if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
@@ -3366,7 +3557,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
                        Ok(res) => Ok(res),
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                               let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
                                if drop { retain_channel = false; }
                                Err(res)
                        }
@@ -3374,7 +3565,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let ret_err = match res {
                        Ok(Some((update_fee, commitment_signed, monitor_update))) => {
                                if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+                                       let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
                                        if drop { retain_channel = false; }
                                        res
                                } else {
@@ -3407,16 +3598,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_id = &mut channel_state.short_to_id;
+                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push(err);
@@ -3436,6 +3627,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///  * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
        ///    than a minute, informing the network that they should no longer attempt to route over
        ///    the channel.
+       ///  * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
+       ///    with the current `ChannelConfig`.
        ///
        /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
        /// estimate fetches.
@@ -3444,7 +3637,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
@@ -3452,10 +3645,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_id = &mut channel_state.short_to_id;
+                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
                                        let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push((err, counterparty_node_id));
@@ -3463,7 +3656,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if !retain_channel { return false; }
 
                                        if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
                                                handle_errors.push((Err(err), chan.get_counterparty_node_id()));
                                                if needs_close { return false; }
                                        }
@@ -3494,10 +3687,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                _ => {},
                                        }
 
+                                       chan.maybe_expire_prev_config();
+
                                        true
                                });
 
-                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        if htlcs.is_empty() {
                                                // This should be unreachable
                                                debug_assert!(false);
@@ -3521,7 +3716,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
-                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+                               let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
                        }
 
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3534,14 +3730,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
        /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
        /// along the path (including in our own channel on which we received it).
-       /// Returns false if no payment was found to fail backwards, true if the process of failing the
-       /// HTLC backwards has been started.
-       pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+       ///
+       /// Note that in some cases around unclean shutdown, it is possible the payment may have
+       /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
+       /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment
+       /// may have already been failed automatically by LDK if it was nearing its expiration time.
+       ///
+       /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
+       /// [`ChannelManager::claim_funds`]), you should still monitor for
+       /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
+       /// startup during which time claims that were in-progress at shutdown may be replayed.
+       pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
-               if let Some(mut sources) = removed_source {
+               if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
                                if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
@@ -3549,10 +3753,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                self.best_block.read().unwrap().height()));
                                self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
-                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                               HTLCDestination::FailedPayment { payment_hash: *payment_hash });
                        }
-                       true
-               } else { false }
+               }
        }
 
        /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
@@ -3585,7 +3789,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
                        let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
                        if desired_err_code == 0x1000 | 20 {
-                               // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                               // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
+                               // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
                                0u16.write(&mut enc).expect("Writes cannot fail");
                        }
                        (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
@@ -3604,7 +3809,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
        // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
        // be surfaced to the user.
-       fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) {
+       fn fail_holding_cell_htlcs(
+               &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
+               counterparty_node_id: &PublicKey
+       ) {
                for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
                        match htlc_src {
                                HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
@@ -3616,8 +3824,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
                                                };
                                        let channel_state = self.channel_state.lock().unwrap();
-                                       self.fail_htlc_backwards_internal(channel_state,
-                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+                                       self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
                                },
                                HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
                                        let mut session_priv_bytes = [0; 32];
@@ -3670,7 +3879,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// to fail and take the channel_state lock for each iteration (as we take ownership and may
        /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
        /// still-available channels.
-       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
                //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
                //identify whether we sent it or not based on the (I presume) very different runtime
                //between the branches here. We should make this async and move it into the forward HTLCs
@@ -3710,7 +3919,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        return;
                                }
                                mem::drop(channel_state_lock);
-                               let retry = if let Some(payment_params_data) = payment_params {
+                               let mut retry = if let Some(payment_params_data) = payment_params {
                                        let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
                                        Some(RouteParameters {
                                                payment_params: payment_params_data.clone(),
@@ -3726,22 +3935,43 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 #[cfg(not(test))]
                                                let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
-                                               // TODO: If we decided to blame ourselves (or one of our channels) in
-                                               // process_onion_failure we should close that channel as it implies our
-                                               // next-hop is needlessly blaming us!
-                                               events::Event::PaymentPathFailed {
-                                                       payment_id: Some(payment_id),
-                                                       payment_hash: payment_hash.clone(),
-                                                       rejected_by_dest: !payment_retryable,
-                                                       network_update,
-                                                       all_paths_failed,
-                                                       path: path.clone(),
-                                                       short_channel_id,
-                                                       retry,
-#[cfg(test)]
-                                                       error_code: onion_error_code,
-#[cfg(test)]
-                                                       error_data: onion_error_data
+
+                                               if self.payment_is_probe(payment_hash, &payment_id) {
+                                                       if !payment_retryable {
+                                                               events::Event::ProbeSuccessful {
+                                                                       payment_id,
+                                                                       payment_hash: payment_hash.clone(),
+                                                                       path: path.clone(),
+                                                               }
+                                                       } else {
+                                                               events::Event::ProbeFailed {
+                                                                       payment_id: payment_id,
+                                                                       payment_hash: payment_hash.clone(),
+                                                                       path: path.clone(),
+                                                                       short_channel_id,
+                                                               }
+                                                       }
+                                               } else {
+                                                       // TODO: If we decided to blame ourselves (or one of our channels) in
+                                                       // process_onion_failure we should close that channel as it implies our
+                                                       // next-hop is needlessly blaming us!
+                                                       if let Some(scid) = short_channel_id {
+                                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+                                                       }
+                                                       events::Event::PaymentPathFailed {
+                                                               payment_id: Some(payment_id),
+                                                               payment_hash: payment_hash.clone(),
+                                                               rejected_by_dest: !payment_retryable,
+                                                               network_update,
+                                                               all_paths_failed,
+                                                               path: path.clone(),
+                                                               short_channel_id,
+                                                               retry,
+                                                               #[cfg(test)]
+                                                               error_code: onion_error_code,
+                                                               #[cfg(test)]
+                                                               error_data: onion_error_data
+                                                       }
                                                }
                                        },
                                        &HTLCFailReason::Reason {
@@ -3752,11 +3982,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        .. } => {
                                                // we get a fail_malformed_htlc from the first hop
                                                // TODO: We'd like to generate a NetworkUpdate for temporary
-                                               // failures here, but that would be insufficient as get_route
+                                               // failures here, but that would be insufficient as find_route
                                                // generally ignores its view of our own channels as we provide them via
                                                // ChannelDetails.
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
+                                               let scid = path.first().unwrap().short_channel_id;
+                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
                                                events::Event::PaymentPathFailed {
                                                        payment_id: Some(payment_id),
                                                        payment_hash: payment_hash.clone(),
@@ -3764,7 +3996,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        network_update: None,
                                                        all_paths_failed,
                                                        path: path.clone(),
-                                                       short_channel_id: Some(path.first().unwrap().short_channel_id),
+                                                       short_channel_id: Some(scid),
                                                        retry,
 #[cfg(test)]
                                                        error_code: Some(*failure_code),
@@ -3777,7 +4009,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                pending_events.push(path_failure);
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
@@ -3809,12 +4041,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                }
                                mem::drop(channel_state_lock);
+                               let mut pending_events = self.pending_events.lock().unwrap();
                                if let Some(time) = forward_event {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
                                        pending_events.push(events::Event::PendingHTLCsForwardable {
                                                time_forwardable: time
                                        });
                                }
+                               pending_events.push(events::Event::HTLCHandlingFailed {
+                                       prev_channel_id: outpoint.to_channel_id(),
+                                       failed_next_destination: destination
+                               });
                        },
                }
        }
@@ -3822,26 +4058,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
        /// [`MessageSendEvent`]s needed to claim the payment.
        ///
+       /// Note that calling this method does *not* guarantee that the payment has been claimed. You
+       /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
+       /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+       ///
        /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
        /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
        /// event matches your expectation. If you fail to do so and call this method, you may provide
        /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
        ///
-       /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
-       /// pending for processing via [`get_and_clear_pending_msg_events`].
-       ///
        /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
+       /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
+       /// [`process_pending_events`]: EventsProvider::process_pending_events
        /// [`create_inbound_payment`]: Self::create_inbound_payment
        /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
        /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
-       pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
+       pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let mut channel_state = Some(self.channel_state.lock().unwrap());
                let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
-               if let Some(mut sources) = removed_source {
+               if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
 
                        // If we are claiming an MPP payment, we have to take special care to ensure that each
@@ -3855,12 +4094,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // we got all the HTLCs and then a channel closed while we were waiting for the user to
                        // provide the preimage, so worrying too much about the optimal handling isn't worth
                        // it.
+                       let mut claimable_amt_msat = 0;
+                       let mut expected_amt_msat = None;
                        let mut valid_mpp = true;
                        for htlc in sources.iter() {
-                               if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
+                               if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
                                        valid_mpp = false;
                                        break;
                                }
+                               if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
+                                       log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+                                       debug_assert!(false);
+                                       valid_mpp = false;
+                                       break;
+                               }
+                               expected_amt_msat = Some(htlc.total_msat);
+                               if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
+                                       // We don't currently support MPP for spontaneous payments, so just check
+                                       // that there's one payment here and move on.
+                                       if sources.len() != 1 {
+                                               log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
+                                               debug_assert!(false);
+                                               valid_mpp = false;
+                                               break;
+                                       }
+                               }
+
+                               claimable_amt_msat += htlc.value;
+                       }
+                       if sources.is_empty() || expected_amt_msat.is_none() {
+                               log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
+                               return;
+                       }
+                       if claimable_amt_msat != expected_amt_msat.unwrap() {
+                               log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
+                                       expected_amt_msat.unwrap(), claimable_amt_msat);
+                               return;
                        }
 
                        let mut errs = Vec::new();
@@ -3873,7 +4142,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        self.best_block.read().unwrap().height()));
                                        self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                                         HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
-                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
+                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+                                                                        HTLCDestination::FailedPayment { payment_hash } );
                                } else {
                                        match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
                                                ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
@@ -3896,6 +4166,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                        }
 
+                       if claimed_any_htlcs {
+                               self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+                                       payment_hash,
+                                       purpose: payment_purpose,
+                                       amount_msat: claimable_amt_msat,
+                               });
+                       }
+
                        // Now that we've done the entire above loop in one lock, we can handle any errors
                        // which were generated.
                        channel_state.take();
@@ -3904,16 +4182,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                let res: Result<(), _> = Err(err);
                                let _ = handle_error!(self, res, counterparty_node_id);
                        }
-
-                       claimed_any_htlcs
-               } else { false }
+               }
        }
 
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
-               let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
-                       Some(chan_id) => chan_id.clone(),
+               let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) {
+                       Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                return ClaimFundsFromHop::PrevHopForceClosed
                        }
@@ -3960,7 +4236,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        payment_preimage, e);
                                        }
                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
                                        if drop {
                                                chan.remove_entry();
                                        }
@@ -4111,7 +4387,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let chan_restoration_res;
-               let (mut pending_failures, finalized_claims) = {
+               let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -4122,10 +4398,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return;
                        }
 
+                       let counterparty_node_id = channel.get().get_counterparty_node_id();
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
-                       let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() {
+                       let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
-                               // funding_locked and the channel is in a usable state. We may re-send a
+                               // channel_ready and the channel is in a usable state. We may re-send a
                                // channel_update later through the announcement_signatures process for public
                                // channels, but there's no reason not to just inform our counterparty of our fees
                                // now.
@@ -4136,16 +4413,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        })
                                } else { None }
                        } else { None };
-                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
+                       chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
-                       (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+                       (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                }
        }
 
@@ -4159,6 +4438,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
        /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
        ///
+       /// Note that this method will return an error and reject the channel, if it requires support
+       /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
+       /// used to accept such channels.
+       ///
        /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
        /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
        pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
@@ -4200,7 +4483,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if *counterparty_node_id != channel.get().get_counterparty_node_id() {
                                        return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
                                }
-                               if accept_0conf { channel.get_mut().set_0conf(); }
+                               if accept_0conf {
+                                       channel.get_mut().set_0conf();
+                               } else if channel.get().get_channel_type().requires_zero_conf() {
+                                       let send_msg_err_event = events::MessageSendEvent::HandleError {
+                                               node_id: channel.get().get_counterparty_node_id(),
+                                               action: msgs::ErrorAction::SendErrorMessage{
+                                                       msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+                                               }
+                                       };
+                                       channel_state.pending_msg_events.push(send_msg_err_event);
+                                       let _ = remove_channel!(self, channel_state, channel);
+                                       return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+                               }
+
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: channel.get().get_counterparty_node_id(),
                                        msg: channel.get_mut().accept_inbound_channel(user_channel_id),
@@ -4242,6 +4538,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        },
                        hash_map::Entry::Vacant(entry) => {
                                if !self.default_configuration.manually_accept_inbound_channels {
+                                       if channel.get_channel_type().requires_zero_conf() {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+                                       }
                                        channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                                node_id: counterparty_node_id.clone(),
                                                msg: channel.accept_inbound_channel(0),
@@ -4274,7 +4573,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
-                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.peer_channel_config_limits, &their_features), channel_state, chan);
+                                       try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), channel_state, chan);
                                        (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
@@ -4292,7 +4591,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
-               let ((funding_msg, monitor, mut funding_locked), mut chan) = {
+               let ((funding_msg, monitor, mut channel_ready), mut chan) = {
                        let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
@@ -4325,10 +4624,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
                                        // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                                       // accepted payment from yet. We do, however, need to wait to send our funding_locked
+                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
                                        // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, funding_locked.is_some(), Vec::new(), Vec::new(), Vec::new());
-                                       funding_locked = None; // Don't send the funding_locked now
+                                       chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+                                       channel_ready = None; // Don't send the channel_ready now
                                },
                        }
                }
@@ -4339,12 +4638,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
+                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                               match id_to_peer.entry(chan.channel_id()) {
+                                       hash_map::Entry::Occupied(_) => {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                                                       "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
+                                                       funding_msg.channel_id))
+                                       },
+                                       hash_map::Entry::Vacant(i_e) => {
+                                               i_e.insert(chan.get_counterparty_node_id());
+                                       }
+                               }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
-                               if let Some(msg) = funding_locked {
-                                       send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
+                               if let Some(msg) = channel_ready {
+                                       send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg);
                                }
                                e.insert(chan);
                        }
@@ -4362,12 +4672,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (monitor, funding_tx, funding_locked) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
+                                       let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, funding_locked.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
                                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                                        // it. Previously, full_stack_target found an (unreachable) panic when the
@@ -4378,8 +4688,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                }
                                                return res
                                        }
-                                       if let Some(msg) = funding_locked {
-                                               send_funding_locked!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
+                                       if let Some(msg) = channel_ready {
+                                               send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
                                        }
                                        funding_tx
                                },
@@ -4391,7 +4701,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
-       fn internal_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
+       fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(msg.channel_id) {
@@ -4399,7 +4709,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().funding_locked(&msg, self.get_our_node_id(),
+                               let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
                                        self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
                                if let Some(announcement_sigs) = announcement_sigs_opt {
                                        log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
@@ -4452,7 +4762,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if let Some(monitor_update) = monitor_update {
                                                if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
                                                        let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                                               handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
                                                        if is_permanent {
                                                                remove_channel!(self, channel_state, chan_entry);
                                                                break result;
@@ -4473,7 +4783,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -4534,7 +4845,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+               let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
@@ -4753,13 +5065,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
+               self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
                match res {
                        Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
                                short_channel_id, channel_outpoint)) =>
                        {
                                for failure in pending_failures.drain(..) {
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
                                }
                                self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
                                self.finalize_claims(finalized_claim_htlcs);
@@ -4814,8 +5127,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
-                       Some(chan_id) => chan_id.clone(),
+               let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) {
+                       Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                // It's not a local channel
                                return Ok(NotifyOption::SkipPersist)
@@ -4883,7 +5196,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
                                        chan_restoration_res = handle_chan_restoration_locked!(
                                                self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
-                                               responses.mon_update, Vec::new(), None, responses.funding_locked, responses.announcement_sigs);
+                                               responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
                                        if let Some(upd) = channel_update {
                                                channel_state.pending_msg_events.push(upd);
                                        }
@@ -4893,10 +5206,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
-               self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+               self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
 
-               if let Some(funding_locked_msg) = need_lnd_workaround {
-                       self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
+               if let Some(channel_ready_msg) = need_lnd_workaround {
+                       self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
                Ok(())
        }
@@ -4906,7 +5219,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
@@ -4915,7 +5228,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                                                }
                                        },
                                        MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@ -4984,19 +5298,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
                                match chan.maybe_free_holding_cell_htlcs(&self.logger) {
                                        Ok((commitment_opt, holding_cell_failed_htlcs)) => {
                                                if !holding_cell_failed_htlcs.is_empty() {
-                                                       failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
+                                                       failed_htlcs.push((
+                                                               holding_cell_failed_htlcs,
+                                                               *channel_id,
+                                                               chan.get_counterparty_node_id()
+                                                       ));
                                                }
                                                if let Some((commitment_update, monitor_update)) = commitment_opt {
                                                        if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
                                                                has_monitor_update = true;
-                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
                                                                handle_errors.push((chan.get_counterparty_node_id(), res));
                                                                if close_channel { return false; }
                                                        } else {
@@ -5009,7 +5327,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                true
                                        },
                                        Err(e) => {
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                // ChannelClosed event is generated by handle_error for us
                                                !close_channel
@@ -5019,8 +5337,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
 
                let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
-               for (failures, channel_id) in failed_htlcs.drain(..) {
-                       self.fail_holding_cell_htlcs(failures, channel_id);
+               for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
+                       self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
                }
 
                for (counterparty_node_id, err) in handle_errors.drain(..) {
@@ -5040,7 +5358,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
@@ -5065,13 +5383,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
-                                                       update_maps_on_chan_removal!(self, short_to_id, chan);
+                                                       update_maps_on_chan_removal!(self, short_to_chan_info, chan);
                                                        false
                                                } else { true }
                                        },
                                        Err(e) => {
                                                has_update = true;
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                !close_channel
                                        }
@@ -5266,7 +5584,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                loop {
                        let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
                        // Ensure the generated scid doesn't conflict with a real channel.
-                       match channel_state.short_to_id.entry(scid_candidate) {
+                       match channel_state.short_to_chan_info.entry(scid_candidate) {
                                hash_map::Entry::Occupied(_) => continue,
                                hash_map::Entry::Vacant(_) => return scid_candidate
                        }
@@ -5501,7 +5819,7 @@ where
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
                let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+               let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len());
                for chan in channel_state.by_id.values() {
                        if let Some(funding_txo) = chan.get_funding_txo() {
                                res.push(funding_txo.txid);
@@ -5533,7 +5851,7 @@ where
        /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
        /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
        /// the function.
-       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
+       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
                        (&self, height_opt: Option<u32>, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
@@ -5544,21 +5862,21 @@ where
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let res = f(channel);
-                               if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+                               if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
-                                               }));
+                                               }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
-                                       if let Some(funding_locked) = funding_locked_opt {
-                                               send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
+                                       if let Some(channel_ready) = channel_ready_opt {
+                                               send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
                                                if channel.is_usable() {
-                                                       log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
                                                        if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
                                                                pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
                                                                        node_id: channel.get_counterparty_node_id(),
@@ -5566,7 +5884,7 @@ where
                                                                });
                                                        }
                                                } else {
-                                                       log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+                                                       log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
                                                }
                                        }
                                        if let Some(announcement_sigs) = announcement_sigs {
@@ -5586,21 +5904,22 @@ where
                                                        }
                                                }
                                        }
-                                       if channel.is_our_funding_locked() {
+                                       if channel.is_our_channel_ready() {
                                                if let Some(real_scid) = channel.get_short_channel_id() {
-                                                       // If we sent a 0conf funding_locked, and now have an SCID, we add it
-                                                       // to the short_to_id map here. Note that we check whether we can relay
-                                                       // using the real SCID at relay-time (i.e. enforce option_scid_alias
-                                                       // then), and if the funding tx is ever un-confirmed we force-close the
-                                                       // channel, ensuring short_to_id is always consistent.
-                                                       let scid_insert = short_to_id.insert(real_scid, channel.channel_id());
-                                                       assert!(scid_insert.is_none() || scid_insert.unwrap() == channel.channel_id(),
+                                                       // If we sent a 0conf channel_ready, and now have an SCID, we add it
+                                                       // to the short_to_chan_info map here. Note that we check whether we
+                                                       // can relay using the real SCID at relay-time (i.e.
+                                                       // enforce option_scid_alias then), and if the funding tx is ever
+                                                       // un-confirmed we force-close the channel, ensuring short_to_chan_info
+                                                       // is always consistent.
+                                                       let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                                       assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
                                                                "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
                                                                fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
                                                }
                                        }
                                } else if let Err(reason) = res {
-                                       update_maps_on_chan_removal!(self, short_to_id, channel);
+                                       update_maps_on_chan_removal!(self, short_to_chan_info, channel);
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
@@ -5624,7 +5943,7 @@ where
                        });
 
                        if let Some(height) = height_opt {
-                               channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+                               channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
                                        htlcs.retain(|htlc| {
                                                // If height is approaching the number of blocks we think it takes us to get
                                                // our commitment transaction confirmed before the HTLC expires, plus the
@@ -5633,10 +5952,11 @@ where
                                                if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
                                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
                                                        timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
                                                                failure_code: 0x4000 | 15,
                                                                data: htlc_msat_height_data
-                                                       }));
+                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
                                                        false
                                                } else { true }
                                        });
@@ -5647,8 +5967,8 @@ where
 
                self.handle_init_event_channel_failures(failed_channels);
 
-               for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+               for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
                }
        }
 
@@ -5713,9 +6033,9 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
 
-       fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+       fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
+               let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
@@ -5791,14 +6111,14 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
                        channel_state.by_id.retain(|_, chan| {
                                if chan.get_counterparty_node_id() == *counterparty_node_id {
                                        chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                        if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, short_to_id, chan);
+                                               update_maps_on_chan_removal!(self, short_to_chan_info, chan);
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                return false;
                                        } else {
@@ -5813,7 +6133,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
-                                       &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != counterparty_node_id,
+                                       &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
@@ -5890,7 +6210,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        for chan in self.list_channels() {
                                if chan.counterparty.node_id == *counterparty_node_id {
                                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
+                                       let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
                                }
                        }
                } else {
@@ -5912,7 +6232,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        }
 
                        // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
-                       let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
+                       let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
                }
        }
 }
@@ -6013,6 +6333,7 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (4, counterparty, required),
        (5, outbound_scid_alias, option),
        (6, funding_txo, option),
+       (7, config, option),
        (8, short_channel_id, option),
        (10, channel_value_satoshis, required),
        (12, unspendable_punishment_reserve, option),
@@ -6021,12 +6342,12 @@ impl_writeable_tlv_based!(ChannelDetails, {
        (18, outbound_capacity_msat, required),
        // Note that by the time we get past the required read above, outbound_capacity_msat will be
        // filled in, so we can safely unwrap it here.
-       (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
+       (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
        (20, inbound_capacity_msat, required),
        (22, confirmations_required, option),
        (24, force_close_spend_delay, option),
        (26, is_outbound, required),
-       (28, is_funding_locked, required),
+       (28, is_channel_ready, required),
        (30, is_usable, required),
        (32, is_public, required),
        (33, inbound_htlc_minimum_msat, option),
@@ -6146,13 +6467,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, {
 
 impl Writeable for ClaimableHTLC {
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
-               let payment_data = match &self.onion_payload {
-                       OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
-                       _ => None,
-               };
-               let keysend_preimage = match self.onion_payload {
-                       OnionPayload::Invoice { .. } => None,
-                       OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+               let (payment_data, keysend_preimage) = match &self.onion_payload {
+                       OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
+                       OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
                };
                write_tlv_fields!(writer, {
                        (0, self.prev_hop, required),
@@ -6193,13 +6510,13 @@ impl Readable for ClaimableHTLC {
                                OnionPayload::Spontaneous(p)
                        },
                        None => {
-                               if payment_data.is_none() {
-                                       return Err(DecodeError::InvalidValue)
-                               }
                                if total_msat.is_none() {
+                                       if payment_data.is_none() {
+                                               return Err(DecodeError::InvalidValue)
+                                       }
                                        total_msat = Some(payment_data.as_ref().unwrap().total_msat);
                                }
-                               OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
+                               OnionPayload::Invoice { _legacy_hop_data: payment_data }
                        },
                };
                Ok(Self {
@@ -6372,13 +6689,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        }
                }
 
+               let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
                (channel_state.claimable_htlcs.len() as u64).write(writer)?;
-               for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
+               for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
                        payment_hash.write(writer)?;
                        (previous_hops.len() as u64).write(writer)?;
                        for htlc in previous_hops.iter() {
                                htlc.write(writer)?;
                        }
+                       htlc_purposes.push(purpose);
                }
 
                let per_peer_state = self.per_peer_state.write().unwrap();
@@ -6455,6 +6774,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        (3, pending_outbound_payments, required),
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
+                       (9, htlc_purposes, vec_type),
+                       (11, self.probing_cookie_secret, required),
                });
 
                Ok(())
@@ -6598,7 +6919,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
@@ -6638,7 +6960,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
-                                               short_to_id.insert(short_channel_id, channel.channel_id());
+                                               short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                       }
+                                       if channel.is_funding_initiated() {
+                                               id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
                                        }
                                        by_id.insert(channel.channel_id(), channel);
                                }
@@ -6673,15 +6998,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let claimable_htlcs_count: u64 = Readable::read(reader)?;
-               let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
+               let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
                for _ in 0..claimable_htlcs_count {
                        let payment_hash = Readable::read(reader)?;
                        let previous_hops_len: u64 = Readable::read(reader)?;
                        let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
                        for _ in 0..previous_hops_len {
-                               previous_hops.push(Readable::read(reader)?);
+                               previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
                        }
-                       claimable_htlcs.insert(payment_hash, previous_hops);
+                       claimable_htlcs_list.push((payment_hash, previous_hops));
                }
 
                let peer_count: u64 = Readable::read(reader)?;
@@ -6751,16 +7076,24 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut pending_outbound_payments = None;
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
+               let mut probing_cookie_secret: Option<[u8; 32]> = None;
+               let mut claimable_htlc_purposes = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
                        (3, pending_outbound_payments, option),
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
+                       (9, claimable_htlc_purposes, vec_type),
+                       (11, probing_cookie_secret, option),
                });
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
                }
 
+               if probing_cookie_secret.is_none() {
+                       probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes());
+               }
+
                if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
                } else if pending_outbound_payments.is_none() {
@@ -6778,7 +7111,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        // payments which are still in-flight via their on-chain state.
                        // We only rebuild the pending payments map if we were most recently serialized by
                        // 0.0.102+
-                       for (_, monitor) in args.channel_monitors {
+                       for (_, monitor) in args.channel_monitors.iter() {
                                if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
                                        for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
                                                if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
@@ -6816,6 +7149,49 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
+               let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
+               let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+
+               let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
+               if let Some(mut purposes) = claimable_htlc_purposes {
+                       if purposes.len() != claimable_htlcs_list.len() {
+                               return Err(DecodeError::InvalidValue);
+                       }
+                       for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
+                               claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+                       }
+               } else {
+                       // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
+                       // include a `_legacy_hop_data` in the `OnionPayload`.
+                       for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
+                               if previous_hops.is_empty() {
+                                       return Err(DecodeError::InvalidValue);
+                               }
+                               let purpose = match &previous_hops[0].onion_payload {
+                                       OnionPayload::Invoice { _legacy_hop_data } => {
+                                               if let Some(hop_data) = _legacy_hop_data {
+                                                       events::PaymentPurpose::InvoicePayment {
+                                                               payment_preimage: match pending_inbound_payments.get(&payment_hash) {
+                                                                       Some(inbound_payment) => inbound_payment.payment_preimage,
+                                                                       None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
+                                                                               Ok(payment_preimage) => payment_preimage,
+                                                                               Err(()) => {
+                                                                                       log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
+                                                                                       return Err(DecodeError::InvalidValue);
+                                                                               }
+                                                                       }
+                                                               },
+                                                               payment_secret: hop_data.payment_secret,
+                                                       }
+                                               } else { return Err(DecodeError::InvalidValue); }
+                                       },
+                                       OnionPayload::Spontaneous(payment_preimage) =>
+                                               events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
+                               };
+                               claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+                       }
+               }
+
                let mut secp_ctx = Secp256k1::new();
                secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
 
@@ -6852,7 +7228,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                return Err(DecodeError::InvalidValue);
                        }
                        if chan.is_usable() {
-                               if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() {
+                               if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
                                        // Note that in rare cases its possible to hit this while reading an older
                                        // channel if we just happened to pick a colliding outbound alias above.
                                        log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
@@ -6861,11 +7237,51 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
-               let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
-               let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+               let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
+
+               for (_, monitor) in args.channel_monitors.iter() {
+                       for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
+                               if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
+                                       log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
+                                       let mut claimable_amt_msat = 0;
+                                       for claimable_htlc in claimable_htlcs {
+                                               claimable_amt_msat += claimable_htlc.value;
+
+                                               // Add a holding-cell claim of the payment to the Channel, which should be
+                                               // applied ~immediately on peer reconnection. Because it won't generate a
+                                               // new commitment transaction we can just provide the payment preimage to
+                                               // the corresponding ChannelMonitor and nothing else.
+                                               //
+                                               // We do so directly instead of via the normal ChannelMonitor update
+                                               // procedure as the ChainMonitor hasn't yet been initialized, implying
+                                               // we're not allowed to call it directly yet. Further, we do the update
+                                               // without incrementing the ChannelMonitor update ID as there isn't any
+                                               // reason to.
+                                               // If we were to generate a new ChannelMonitor update ID here and then
+                                               // crash before the user finishes block connect we'd end up force-closing
+                                               // this channel as well. On the flip side, there's no harm in restarting
+                                               // without the new monitor persisted - we'll end up right back here on
+                                               // restart.
+                                               let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+                                               if let Some(channel) = by_id.get_mut(&previous_channel_id) {
+                                                       channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+                                               }
+                                               if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
+                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
+                                               }
+                                       }
+                                       pending_events_read.push(events::Event::PaymentClaimed {
+                                               payment_hash,
+                                               purpose: payment_purpose,
+                                               amount_msat: claimable_amt_msat,
+                                       });
+                               }
+                       }
+               }
+
                let channel_manager = ChannelManager {
                        genesis_hash,
-                       fee_estimator: args.fee_estimator,
+                       fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
 
@@ -6873,7 +7289,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
-                               short_to_id,
+                               short_to_chan_info,
                                forward_htlcs,
                                claimable_htlcs,
                                pending_msg_events: Vec::new(),
@@ -6883,8 +7299,11 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
+                       id_to_peer: Mutex::new(id_to_peer),
                        fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
 
+                       probing_cookie_secret: probing_cookie_secret.unwrap(),
+
                        our_network_key,
                        our_network_pubkey,
                        secp_ctx,
@@ -6905,7 +7324,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -6930,7 +7351,7 @@ mod tests {
        use ln::msgs::ChannelMessageHandler;
        use routing::router::{PaymentParameters, RouteParameters, find_route};
        use util::errors::APIError;
-       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use util::test_utils;
        use chain::keysinterface::KeysInterface;
 
@@ -7093,7 +7514,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7116,8 +7537,10 @@ mod tests {
                // claim_funds_along_route because the ordering of the messages causes the second half of the
                // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
                // lightning messages manually.
-               assert!(nodes[1].node.claim_funds(payment_preimage));
+               nodes[1].node.claim_funds(payment_preimage);
+               expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
                check_added_monitors!(nodes[1], 2);
+
                let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
                nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
@@ -7199,8 +7622,8 @@ mod tests {
                        final_cltv_expiry_delta: TEST_FINAL_CLTV,
                };
                let route = find_route(
-                       &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+                       None, nodes[0].logger, &scorer, &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
                check_added_monitors!(nodes[0], 1);
@@ -7211,8 +7634,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+               // fails), the second will process the resulting failure and fail the HTLC backward
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7230,8 +7655,8 @@ mod tests {
                // To start (2), send a keysend payment but don't claim it.
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
-                       &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
-                       nodes[0].logger, &scorer, &random_seed_bytes
+                       &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+                       None, nodes[0].logger, &scorer, &random_seed_bytes
                ).unwrap();
                let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
                check_added_monitors!(nodes[0], 1);
@@ -7253,7 +7678,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7294,7 +7719,7 @@ mod tests {
                let scorer = test_utils::TestScorer::with_penalty(0);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
-                       &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
                        nodes[0].logger, &scorer, &random_seed_bytes
                ).unwrap();
 
@@ -7338,7 +7763,7 @@ mod tests {
                let scorer = test_utils::TestScorer::with_penalty(0);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
-                       &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+                       &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
                        nodes[0].logger, &scorer, &random_seed_bytes
                ).unwrap();
 
@@ -7417,6 +7842,119 @@ mod tests {
                // Check that using the original payment hash succeeds.
                assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
        }
+
+       #[test]
+       fn test_id_to_peer_coverage() {
+               // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+               // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
+               // the channel is successfully closed.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+               let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel);
+               let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel);
+
+               let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+               let channel_id = &tx.txid().into_inner();
+               {
+                       // Ensure that the `id_to_peer` map is empty until either party has received the
+                       // funding transaction, and have the real `channel_id`.
+                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+
+               nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+               {
+                       // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+                       // as it has the funding transaction.
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+
+               let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+               {
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
+                       // as it has the funding transaction.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+               check_added_monitors!(nodes[1], 1);
+               let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+               check_added_monitors!(nodes[0], 1);
+               let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+               let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+               update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
+
+               nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+               let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &nodes_1_shutdown);
+
+               let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
+               {
+                       // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+                       // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
+                       // fee for the closing transaction has been negotiated and the parties has the other
+                       // party's signature for the fee negotiated closing transaction.)
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
+                       // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
+                       // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
+                       // kept in the `nodes[1]`'s `id_to_peer` map.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
+               {
+                       // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
+                       // therefore has all it needs to fully close the channel (both signatures for the
+                       // closing transaction).
+                       // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+                       // fully closed by `nodes[0]`.
+                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+
+                       // Assert that the channel is still in `nodes[1]`'s  `id_to_peer` map, as `nodes[1]`
+                       // doesn't have `nodes[0]`'s signature for the closing transaction yet.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+
+               let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
+               {
+                       // Assert that the channel has now been removed from both parties `id_to_peer` map once
+                       // they both have everything required to fully close the channel.
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+               let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+
+               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
@@ -7428,7 +7966,7 @@ pub mod bench {
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs::{ChannelMessageHandler, Init};
-       use routing::network_graph::NetworkGraph;
+       use routing::gossip::NetworkGraph;
        use routing::router::{PaymentParameters, get_route};
        use util::test_utils;
        use util::config::UserConfig;
@@ -7468,7 +8006,7 @@ pub mod bench {
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
 
                let mut config: UserConfig = Default::default();
-               config.own_channel_config.minimum_depth = 1;
+               config.channel_handshake_config.minimum_depth = 1;
 
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
                let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
@@ -7516,12 +8054,12 @@ pub mod bench {
                Listen::block_connected(&node_a, &block, 1);
                Listen::block_connected(&node_b, &block, 1);
 
-               node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+               node_a.handle_channel_ready(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
                let msg_events = node_a.get_and_clear_pending_msg_events();
                assert_eq!(msg_events.len(), 2);
                match msg_events[0] {
-                       MessageSendEvent::SendFundingLocked { ref msg, .. } => {
-                               node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+                       MessageSendEvent::SendChannelReady { ref msg, .. } => {
+                               node_b.handle_channel_ready(&node_a.get_our_node_id(), msg);
                                get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
                        },
                        _ => panic!(),
@@ -7531,7 +8069,7 @@ pub mod bench {
                        _ => panic!(),
                }
 
-               let dummy_graph = NetworkGraph::new(genesis_hash);
+               let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a);
 
                let mut payment_count: u64 = 0;
                macro_rules! send_payment {
@@ -7563,7 +8101,8 @@ pub mod bench {
 
                                expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
                                expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
-                               assert!($node_b.claim_funds(payment_preimage));
+                               $node_b.claim_funds(payment_preimage);
+                               expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
 
                                match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
                                        MessageSendEvent::UpdateHTLCs { node_id, updates } => {