Merge pull request #1106 from TheBlueMatt/2021-10-no-perm-err-broadcast
[rust-lightning] / lightning / src / ln / channelmanager.rs
index c87316acc7ccddcafd37219aadeedcba2e3aa216..f234ad9f91f555075804d87db115d98193f09619 100644 (file)
@@ -32,28 +32,30 @@ use bitcoin::hash_types::{BlockHash, Txid};
 use bitcoin::secp256k1::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
 use bitcoin::secp256k1::ecdh::SharedSecret;
-use bitcoin::secp256k1;
+use bitcoin::{LockTime, secp256k1, Sequence};
 
 use chain;
-use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
+use chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
 use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
-use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures};
+use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
+#[cfg(any(feature = "_test_utils", test))]
+use ln::features::InvoiceFeatures;
 use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
 use ln::msgs;
-use ln::msgs::NetAddress;
 use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
+use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
 use ln::wire::Encode;
 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
 use util::config::{UserConfig, ChannelConfig};
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
 use util::{byte_utils, events};
+use util::wakers::{Future, Notifier};
 use util::scid_utils::fake_scid;
 use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
 use util::logger::{Level, Logger};
@@ -64,15 +66,11 @@ use prelude::*;
 use core::{cmp, mem};
 use core::cell::RefCell;
 use io::Read;
-use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard};
 use core::sync::atomic::{AtomicUsize, Ordering};
 use core::time::Duration;
 use core::ops::Deref;
 
-#[cfg(any(test, feature = "std"))]
-use std::time::Instant;
-use util::crypto::sign;
-
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
 // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
@@ -281,7 +279,7 @@ enum ClaimFundsFromHop {
        DuplicateClaim,
 }
 
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -402,17 +400,7 @@ pub(super) struct ChannelHolder<Signer: Sign> {
        /// Outbound SCID aliases are added here once the channel is available for normal use, with
        /// SCIDs being added once the funding transaction is confirmed at the channel's required
        /// confirmation depth.
-       pub(super) short_to_id: HashMap<u64, (PublicKey, [u8; 32])>,
-       /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
-       ///
-       /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
-       /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
-       /// and via the classic SCID.
-       ///
-       /// Note that while this is held in the same mutex as the channels themselves, no consistency
-       /// guarantees are made about the existence of a channel with the short id here, nor the short
-       /// ids in the PendingHTLCInfo!
-       pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
+       pub(super) short_to_chan_info: HashMap<u64, (PublicKey, [u8; 32])>,
        /// Map from payment hash to the payment data and any HTLCs which are to us and can be
        /// failed/claimed by the user.
        ///
@@ -679,6 +667,38 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// essentially you should default to using a SimpleRefChannelManager, and use a
 /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
 /// you're using lightning-net-tokio.
+//
+// Lock order:
+// The tree structure below illustrates the lock order requirements for the different locks of the
+// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
+// and should then be taken in the order of the lowest to the highest level in the tree.
+// Note that locks on different branches shall not be taken at the same time, as doing so will
+// create a new lock order for those specific locks in the order they were taken.
+//
+// Lock order tree:
+//
+// `total_consistency_lock`
+//  |
+//  |__`forward_htlcs`
+//  |
+//  |__`channel_state`
+//  |   |
+//  |   |__`id_to_peer`
+//  |   |
+//  |   |__`per_peer_state`
+//  |       |
+//  |       |__`outbound_scid_aliases`
+//  |       |
+//  |       |__`pending_inbound_payments`
+//  |           |
+//  |           |__`pending_outbound_payments`
+//  |               |
+//  |               |__`best_block`
+//  |               |
+//  |               |__`pending_events`
+//  |                   |
+//  |                   |__`pending_background_events`
+//
 pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -688,16 +708,18 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
 {
        default_configuration: UserConfig,
        genesis_hash: BlockHash,
-       fee_estimator: F,
+       fee_estimator: LowerBoundedFeeEstimator<F>,
        chain_monitor: M,
        tx_broadcaster: T,
 
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(test)]
        pub(super) best_block: RwLock<BestBlock>,
        #[cfg(not(test))]
        best_block: RwLock<BestBlock>,
        secp_ctx: Secp256k1<secp256k1::All>,
 
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        #[cfg(any(test, feature = "_test_utils"))]
        pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
        #[cfg(not(any(test, feature = "_test_utils")))]
@@ -707,7 +729,8 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
        /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
        /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
-       /// Locked *after* channel_state.
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
 
        /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
@@ -721,15 +744,53 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        ///
        /// See `PendingOutboundPayment` documentation for more info.
        ///
-       /// Locked *after* channel_state.
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
 
+       /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
+       ///
+       /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+       /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+       /// and via the classic SCID.
+       ///
+       /// Note that no consistency guarantees are made about the existence of a channel with the
+       /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       #[cfg(test)]
+       pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+       #[cfg(not(test))]
+       forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+
        /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
        /// and some closed channels which reached a usable state prior to being closed. This is used
        /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
        /// active channel list on load.
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        outbound_scid_aliases: Mutex<HashSet<u64>>,
 
+       /// `channel_id` -> `counterparty_node_id`.
+       ///
+       /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
+       /// multiple channels with the same `temporary_channel_id` to different peers can exist,
+       /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+       ///
+       /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
+       /// the corresponding channel for the event, as we only have access to the `channel_id` during
+       /// the handling of the events.
+       ///
+       /// TODO:
+       /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
+       /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
+       /// would break backwards compatability.
+       /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
+       /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
+       /// required to access the channel with the `counterparty_node_id`.
+       ///
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
+       id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+
        our_network_key: SecretKey,
        our_network_pubkey: PublicKey,
 
@@ -742,9 +803,10 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// [fake scids]: crate::util::scid_utils::fake_scid
        fake_scid_rand_bytes: [u8; 32],
 
-       /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
-       /// value increases strictly since we don't assume access to a time source.
-       last_node_announcement_serial: AtomicUsize,
+       /// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
+       /// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
+       /// keeping additional state.
+       probing_cookie_secret: [u8; 32],
 
        /// The highest block timestamp we've seen, which is usually a good guess at the current time.
        /// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
@@ -758,20 +820,22 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
        /// new channel.
        ///
-       /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
 
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_events: Mutex<Vec<events::Event>>,
+       /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_background_events: Mutex<Vec<BackgroundEvent>>,
        /// Used when we have to take a BIG lock to make sure everything is self-consistent.
        /// Essentially just when we're serializing ourselves out.
        /// Taken first everywhere where we are making changes before any other locks.
        /// When acquiring this lock in read mode, rather than acquiring it directly, call
        /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
-       /// PersistenceNotifier the lock contains sends out a notification when the lock is released.
+       /// Notifier the lock contains sends out a notification when the lock is released.
        total_consistency_lock: RwLock<()>,
 
-       persistence_notifier: PersistenceNotifier,
+       persistence_notifier: Notifier,
 
        keys_manager: K,
 
@@ -811,18 +875,18 @@ enum NotifyOption {
 /// notify or not based on whether relevant changes have been made, providing a closure to
 /// `optionally_notify` which returns a `NotifyOption`.
 struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
-       persistence_notifier: &'a PersistenceNotifier,
+       persistence_notifier: &'a Notifier,
        should_persist: F,
        // We hold onto this result so the lock doesn't get released immediately.
        _read_guard: RwLockReadGuard<'a, ()>,
 }
 
 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
-       fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+       fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
                PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
        }
 
-       fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+       fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
                let read_guard = lock.read().unwrap();
 
                PersistenceNotifierGuard {
@@ -1148,13 +1212,13 @@ pub enum PaymentSendFailure {
        /// in over-/re-payment.
        ///
        /// The results here are ordered the same as the paths in the route object which was passed to
-       /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
-       /// retried (though there is currently no API with which to do so).
+       /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
+       /// safely retried via [`ChannelManager::retry_payment`].
        ///
-       /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
-       /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
-       /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
-       /// with the latest update_id.
+       /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
+       /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
+       /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
+       /// the next-hop channel with the latest update_id.
        PartialFailure {
                /// The errors themselves, in the same order as the route hops.
                results: Vec<Result<(), APIError>>,
@@ -1232,9 +1296,9 @@ macro_rules! handle_error {
 }
 
 macro_rules! update_maps_on_chan_removal {
-       ($self: expr, $short_to_id: expr, $channel: expr) => {
+       ($self: expr, $short_to_chan_info: expr, $channel: expr) => {
                if let Some(short_id) = $channel.get_short_channel_id() {
-                       $short_to_id.remove(&short_id);
+                       $short_to_chan_info.remove(&short_id);
                } else {
                        // If the channel was never confirmed on-chain prior to its closure, remove the
                        // outbound SCID alias we used for it from the collision-prevention set. While we
@@ -1245,13 +1309,14 @@ macro_rules! update_maps_on_chan_removal {
                        let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
                        debug_assert!(alias_removed);
                }
-               $short_to_id.remove(&$channel.outbound_scid_alias());
+               $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+               $short_to_chan_info.remove(&$channel.outbound_scid_alias());
        }
 }
 
 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
 macro_rules! convert_chan_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+       ($self: ident, $err: expr, $short_to_chan_info: expr, $channel: expr, $channel_id: expr) => {
                match $err {
                        ChannelError::Warn(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
@@ -1261,7 +1326,7 @@ macro_rules! convert_chan_err {
                        },
                        ChannelError::Close(msg) => {
                                log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
-                               update_maps_on_chan_removal!($self, $short_to_id, $channel);
+                               update_maps_on_chan_removal!($self, $short_to_chan_info, $channel);
                                let shutdown_res = $channel.force_shutdown(true);
                                (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
                                        shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
@@ -1275,7 +1340,7 @@ macro_rules! break_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1290,7 +1355,7 @@ macro_rules! try_chan_entry {
                match $res {
                        Ok(res) => res,
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+                               let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_chan_info, $entry.get_mut(), $entry.key());
                                if drop {
                                        $entry.remove_entry();
                                }
@@ -1304,18 +1369,18 @@ macro_rules! remove_channel {
        ($self: expr, $channel_state: expr, $entry: expr) => {
                {
                        let channel = $entry.remove_entry().1;
-                       update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel);
+                       update_maps_on_chan_removal!($self, $channel_state.short_to_chan_info, channel);
                        channel
                }
        }
 }
 
-macro_rules! handle_monitor_err {
-       ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+macro_rules! handle_monitor_update_res {
+       ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
                match $err {
-                       ChannelMonitorUpdateErr::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
-                               update_maps_on_chan_removal!($self, $short_to_id, $chan);
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
+                               update_maps_on_chan_removal!($self, $short_to_chan_info, $chan);
                                // TODO: $failed_fails is dropped here, which will cause other channels to hit the
                                // chain in a confused state! We need to move them into the ChannelMonitor which
                                // will be responsible for failing backwards once things confirm on-chain.
@@ -1326,11 +1391,11 @@ macro_rules! handle_monitor_err {
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
                                let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
-                                               $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
+                                               $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
                                (res, true)
                        },
-                       ChannelMonitorUpdateErr::TemporaryFailure => {
-                               log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
                                                log_bytes!($chan_id[..]),
                                                if $resend_commitment && $resend_raa {
                                                                match $action_type {
@@ -1349,13 +1414,16 @@ macro_rules! handle_monitor_err {
                                if !$resend_raa {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
-                               $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+                               $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
                                (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
                        },
+                       ChannelMonitorUpdateStatus::Completed => {
+                               (Ok(()), false)
+                       },
                }
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
-               let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+               let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
                if drop {
                        $entry.remove_entry();
                }
@@ -1363,56 +1431,35 @@ macro_rules! handle_monitor_err {
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
                debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        } };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
        };
        ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
+               handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
        };
 }
 
-macro_rules! return_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
-       };
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
-               return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-       }
-}
-
-// Does not break in case of TemporaryFailure!
-macro_rules! maybe_break_monitor_err {
-       ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
-               match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
-                       (e, ChannelMonitorUpdateErr::PermanentFailure) => {
-                               break e;
-                       },
-                       (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
-               }
-       }
-}
-
 macro_rules! send_channel_ready {
-       ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
+       ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {
                $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
                        node_id: $channel.get_counterparty_node_id(),
                        msg: $channel_ready_msg,
                });
                // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
                // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
-               let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
+               let outbound_alias_insert = $short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
                assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                        "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                if let Some(real_scid) = $channel.get_short_channel_id() {
-                       let scid_insert = $short_to_id.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
+                       let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
                        assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
                                "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
                }
@@ -1453,7 +1500,7 @@ macro_rules! handle_chan_restoration_locked {
                                // Similar to the above, this implies that we're letting the channel_ready fly
                                // before it should be allowed to.
                                assert!(chanmon_update.is_none());
-                               send_channel_ready!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
+                               send_channel_ready!($channel_state.short_to_chan_info, $channel_state.pending_msg_events, $channel_entry.get(), msg);
                        }
                        if let Some(msg) = $announcement_sigs {
                                $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
@@ -1474,15 +1521,18 @@ macro_rules! handle_chan_restoration_locked {
                                // only case where we can get a new ChannelMonitorUpdate would be if we also
                                // have some commitment updates to send as well.
                                assert!($commitment_update.is_some());
-                               if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                       // channel_reestablish doesn't guarantee the order it returns is sensical
-                                       // for the messages it returns, but if we're setting what messages to
-                                       // re-transmit on monitor update success, we need to make sure it is sane.
-                                       let mut order = $order;
-                                       if $raa.is_none() {
-                                               order = RAACommitmentOrder::CommitmentFirst;
+                               match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {},
+                                       e => {
+                                               // channel_reestablish doesn't guarantee the order it returns is sensical
+                                               // for the messages it returns, but if we're setting what messages to
+                                               // re-transmit on monitor update success, we need to make sure it is sane.
+                                               let mut order = $order;
+                                               if $raa.is_none() {
+                                                       order = RAACommitmentOrder::CommitmentFirst;
+                                               }
+                                               break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                        }
-                                       break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
                                }
                        }
 
@@ -1567,7 +1617,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                ChannelManager {
                        default_configuration: config.clone(),
                        genesis_hash: genesis_block(params.network).header.block_hash(),
-                       fee_estimator: fee_est,
+                       fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
                        chain_monitor,
                        tx_broadcaster,
 
@@ -1575,14 +1625,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
-                               short_to_id: HashMap::new(),
-                               forward_htlcs: HashMap::new(),
+                               short_to_chan_info: HashMap::new(),
                                claimable_htlcs: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        outbound_scid_aliases: Mutex::new(HashSet::new()),
                        pending_inbound_payments: Mutex::new(HashMap::new()),
                        pending_outbound_payments: Mutex::new(HashMap::new()),
+                       forward_htlcs: Mutex::new(HashMap::new()),
+                       id_to_peer: Mutex::new(HashMap::new()),
 
                        our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
                        our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
@@ -1591,7 +1642,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        inbound_payment_key: expanded_inbound_key,
                        fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(),
 
-                       last_node_announcement_serial: AtomicUsize::new(0),
+                       probing_cookie_secret: keys_manager.get_secure_random_bytes(),
+
                        highest_seen_timestamp: AtomicUsize::new(0),
 
                        per_peer_state: RwLock::new(HashMap::new()),
@@ -1599,7 +1651,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        pending_events: Mutex::new(Vec::new()),
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
-                       persistence_notifier: PersistenceNotifier::new(),
+                       persistence_notifier: Notifier::new(),
 
                        keys_manager,
 
@@ -1607,7 +1659,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Gets the current configuration applied to all new channels,  as
+       /// Gets the current configuration applied to all new channels.
        pub fn get_current_default_configuration(&self) -> &UserConfig {
                &self.default_configuration
        }
@@ -1831,13 +1883,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -1862,7 +1913,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+                       self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -1918,7 +1970,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+                       self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
                if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
@@ -2113,17 +2167,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                })
        }
 
-       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
+       fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
                macro_rules! return_malformed_err {
                        ($msg: expr, $err_code: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
                                                failure_code: $err_code,
-                                       })), self.channel_state.lock().unwrap());
+                                       }));
                                }
                        }
                }
@@ -2143,25 +2197,20 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        //node knows the HMAC matched, so they already know what is there...
                        return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
                }
-
-               let mut channel_state = None;
                macro_rules! return_err {
                        ($msg: expr, $err_code: expr, $data: expr) => {
                                {
                                        log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                       if channel_state.is_none() {
-                                               channel_state = Some(self.channel_state.lock().unwrap());
-                                       }
-                                       return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                       return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
                                                channel_id: msg.channel_id,
                                                htlc_id: msg.htlc_id,
                                                reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
-                                       })), channel_state.unwrap());
+                                       }));
                                }
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2215,14 +2264,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
 
-               channel_state = Some(self.channel_state.lock().unwrap());
                if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
                        // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
                        // with a short_channel_id of 0. This is important as various things later assume
                        // short_channel_id is non-0 in any ::Forward.
                        if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
-                               let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
                                if let Some((err, code, chan_update)) = loop {
+                                       let mut channel_state = self.channel_state.lock().unwrap();
+                                       let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned();
                                        let forwarding_id_opt = match id_option {
                                                None => { // unknown_next_peer
                                                        // Note that this is likely a timing oracle for detecting whether an scid is a
@@ -2236,7 +2285,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Some((_cp_id, chan_id)) => Some(chan_id.clone()),
                                        };
                                        let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
-                                               let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+                                               let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
                                                if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
                                                        // Note that the behavior here should be identical to the above block - we
                                                        // should NOT reveal the existence or non-existence of a private channel if
@@ -2322,7 +2371,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               (pending_forward_info, channel_state.unwrap())
+               pending_forward_info
        }
 
        /// Gets the current channel_update for the given channel. This first checks if the channel is
@@ -2369,7 +2418,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
                        cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
-                       htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
+                       htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
                        fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
                        fee_proportional_millionths: chan.get_fee_proportional_millionths(),
                        excess_data: Vec::new(),
@@ -2414,7 +2463,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                }
                        }
 
-                       let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
+                       let id = match channel_lock.short_to_chan_info.get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                Some((_cp_id, chan_id)) => chan_id.clone(),
                        };
@@ -2455,19 +2504,30 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        channel_state, chan)
                                } {
                                        Some((update_add, commitment_signed, monitor_update)) => {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
-                                                       // Note that MonitorUpdateFailed here indicates (per function docs)
-                                                       // that we will resend the commitment update once monitor updating
-                                                       // is restored. Therefore, we must return an error indicating that
-                                                       // it is unsafe to retry the payment wholesale, which we do in the
-                                                       // send_payment check for MonitorUpdateFailed, below.
-                                                       insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
-                                                       return Err(APIError::MonitorUpdateFailed);
+                                               let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                                               let chan_id = chan.get().channel_id();
+                                               match (update_err,
+                                                       handle_monitor_update_res!(self, update_err, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false, true))
+                                               {
+                                                       (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+                                                       (ChannelMonitorUpdateStatus::Completed, Ok(())) => {
+                                                               insert_outbound_payment!();
+                                                       },
+                                                       (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+                                                               // Note that MonitorUpdateInProgress here indicates (per function
+                                                               // docs) that we will resend the commitment update once monitor
+                                                               // updating completes. Therefore, we must return an error
+                                                               // indicating that it is unsafe to retry the payment wholesale,
+                                                               // which we do in the send_payment check for
+                                                               // MonitorUpdateInProgress, below.
+                                                               insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
+                                                               return Err(APIError::MonitorUpdateInProgress);
+                                                       },
+                                                       _ => unreachable!(),
                                                }
-                                               insert_outbound_payment!();
 
-                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
+                                               log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
                                                        node_id: path.first().unwrap().pubkey,
                                                        updates: msgs::CommitmentUpdate {
@@ -2513,12 +2573,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        /// PaymentSendFailure for more info.
        ///
        /// In general, a path may raise:
-       ///  * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+       ///  * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee,
        ///    node public key) is specified.
-       ///  * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+       ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
        ///    (including due to previous monitor update failure or new permanent monitor update
        ///    failure).
-       ///  * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+       ///  * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
        ///    relevant updates.
        ///
        /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
@@ -2582,8 +2642,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for (res, path) in results.iter().zip(route.paths.iter()) {
                        if res.is_ok() { has_ok = true; }
                        if res.is_err() { has_err = true; }
-                       if let &Err(APIError::MonitorUpdateFailed) = res {
-                               // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+                       if let &Err(APIError::MonitorUpdateInProgress) = res {
+                               // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
                                // PartialFailure.
                                has_err = true;
                                has_ok = true;
@@ -2733,6 +2793,43 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Send a payment that is probing the given route for liquidity. We calculate the
+       /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
+       /// us to easily discern them from real payments.
+       pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+               let payment_id = PaymentId(self.keys_manager.get_secure_random_bytes());
+
+               let payment_hash = self.probing_cookie_from_id(&payment_id);
+
+               if hops.len() < 2 {
+                       return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+                               err: "No need probing a path with less than two hops".to_string()
+                       }))
+               }
+
+               let route = Route { paths: vec![hops], payment_params: None };
+
+               match self.send_payment_internal(&route, payment_hash, &None, None, Some(payment_id), None) {
+                       Ok(payment_id) => Ok((payment_hash, payment_id)),
+                       Err(e) => Err(e)
+               }
+       }
+
+       /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
+       /// payment probe.
+       pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
+               let target_payment_hash = self.probing_cookie_from_id(payment_id);
+               target_payment_hash == *payment_hash
+       }
+
+       /// Returns the 'probing cookie' for the given [`PaymentId`].
+       fn probing_cookie_from_id(&self, payment_id: &PaymentId) -> PaymentHash {
+               let mut preimage = [0u8; 64];
+               preimage[..32].copy_from_slice(&self.probing_cookie_secret);
+               preimage[32..].copy_from_slice(&payment_id.0);
+               PaymentHash(Sha256::hash(&preimage).into_inner())
+       }
+
        /// Handles the generation of a funding transaction, optionally (for tests) with a function
        /// which checks the correctness of the funding transaction given the associated channel.
        fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>(
@@ -2771,6 +2868,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                panic!("Generated duplicate funding txid?");
                        },
                        hash_map::Entry::Vacant(e) => {
+                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                               if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+                                       panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+                               }
                                e.insert(chan);
                        }
                }
@@ -2829,8 +2930,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        // Transactions are evaluated as final by network mempools at the next block. However, the modules
                        // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
                        // the wallet module is in advance on the LDK view, allow one more block of headroom.
-                       // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped.
-                       if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 {
+                       if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
                                return Err(APIError::APIMisuseError {
                                        err: "Funding transaction absolute timelock is non-final".to_owned()
                                });
@@ -2863,89 +2963,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                })
        }
 
-       #[allow(dead_code)]
-       // Messages of up to 64KB should never end up more than half full with addresses, as that would
-       // be absurd. We ensure this by checking that at least 100 (our stated public contract on when
-       // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
-       // message...
-       const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
-       #[deny(const_err)]
-       #[allow(dead_code)]
-       // ...by failing to compile if the number of addresses that would be half of a message is
-       // smaller than 100:
-       const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100;
-
-       /// Regenerates channel_announcements and generates a signed node_announcement from the given
-       /// arguments, providing them in corresponding events via
-       /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
-       /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
-       /// announcement to ensure that the lightning P2P network is aware of the channels we have and
-       /// our network addresses.
-       ///
-       /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
-       /// node to humans. They carry no in-protocol meaning.
-       ///
-       /// `addresses` represent the set (possibly empty) of socket addresses on which this node
-       /// accepts incoming connections. These will be included in the node_announcement, publicly
-       /// tying these addresses together and to this node. If you wish to preserve user privacy,
-       /// addresses should likely contain only Tor Onion addresses.
-       ///
-       /// Panics if `addresses` is absurdly large (more than 100).
-       ///
-       /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
-       pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
-               if addresses.len() > 100 {
-                       panic!("More than half the message size was taken up by public addresses!");
-               }
-
-               // While all existing nodes handle unsorted addresses just fine, the spec requires that
-               // addresses be sorted for future compatibility.
-               addresses.sort_by_key(|addr| addr.get_id());
-
-               let announcement = msgs::UnsignedNodeAnnouncement {
-                       features: NodeFeatures::known(),
-                       timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
-                       node_id: self.get_our_node_id(),
-                       rgb, alias, addresses,
-                       excess_address_data: Vec::new(),
-                       excess_data: Vec::new(),
-               };
-               let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
-               let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
-
-               let mut channel_state_lock = self.channel_state.lock().unwrap();
-               let channel_state = &mut *channel_state_lock;
-
-               let mut announced_chans = false;
-               for (_, chan) in channel_state.by_id.iter() {
-                       if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
-                               channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
-                                       msg,
-                                       update_msg: match self.get_channel_update_for_broadcast(chan) {
-                                               Ok(msg) => msg,
-                                               Err(_) => continue,
-                                       },
-                               });
-                               announced_chans = true;
-                       } else {
-                               // If the channel is not public or has not yet reached channel_ready, check the
-                               // next channel. If we don't yet have any public channels, we'll skip the broadcast
-                               // below as peers may not accept it without channels on chain first.
-                       }
-               }
-
-               if announced_chans {
-                       channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
-                               msg: msgs::NodeAnnouncement {
-                                       signature: node_announce_sig,
-                                       contents: announcement
-                               },
-                       });
-               }
-       }
-
        /// Atomically updates the [`ChannelConfig`] for the given channels.
        ///
        /// Once the updates are applied, each eligible channel (advertised with a known short channel
@@ -3025,12 +3042,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
                let mut handle_errors = Vec::new();
                {
-                       let mut channel_state_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_state_lock;
+                       let mut forward_htlcs = HashMap::new();
+                       mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
 
-                       for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
+                       for (short_chan_id, mut pending_forwards) in forward_htlcs {
+                               let mut channel_state_lock = self.channel_state.lock().unwrap();
+                               let channel_state = &mut *channel_state_lock;
                                if short_chan_id != 0 {
-                                       let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
+                                       let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) {
                                                Some((_cp_id, chan_id)) => chan_id.clone(),
                                                None => {
                                                        for forward_info in pending_forwards.drain(..) {
@@ -3038,21 +3057,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
                                                                                routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
                                                                                prev_funding_outpoint } => {
+                                                                                       macro_rules! failure_handler {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+                                                                                                       log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+                                                                                                       let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+                                                                                                               short_channel_id: prev_short_channel_id,
+                                                                                                               outpoint: prev_funding_outpoint,
+                                                                                                               htlc_id: prev_htlc_id,
+                                                                                                               incoming_packet_shared_secret: incoming_shared_secret,
+                                                                                                               phantom_shared_secret: $phantom_ss,
+                                                                                                       });
+
+                                                                                                       let reason = if $next_hop_unknown {
+                                                                                                               HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+                                                                                                       } else {
+                                                                                                               HTLCDestination::FailedPayment{ payment_hash }
+                                                                                                       };
+
+                                                                                                       failed_forwards.push((htlc_source, payment_hash,
+                                                                                                               HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+                                                                                                               reason
+                                                                                                       ));
+                                                                                                       continue;
+                                                                                               }
+                                                                                       }
                                                                                        macro_rules! fail_forward {
                                                                                                ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
                                                                                                        {
-                                                                                                               log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
-                                                                                                               let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
-                                                                                                                       short_channel_id: prev_short_channel_id,
-                                                                                                                       outpoint: prev_funding_outpoint,
-                                                                                                                       htlc_id: prev_htlc_id,
-                                                                                                                       incoming_packet_shared_secret: incoming_shared_secret,
-                                                                                                                       phantom_shared_secret: $phantom_ss,
-                                                                                                               });
-                                                                                                               failed_forwards.push((htlc_source, payment_hash,
-                                                                                                                       HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
-                                                                                                               ));
-                                                                                                               continue;
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+                                                                                                       }
+                                                                                               }
+                                                                                       }
+                                                                                       macro_rules! failed_payment {
+                                                                                               ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+                                                                                                       {
+                                                                                                               failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -3060,7 +3100,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
                                                                                                if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
                                                                                                        let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
-                                                                                                       let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                                       let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
                                                                                                                Ok(res) => res,
                                                                                                                Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                        let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -3068,17 +3108,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                                        // `update_fail_malformed_htlc`, meaning here we encrypt the error as
                                                                                                                        // if it came from us (the second-to-last hop) but contains the sha256
                                                                                                                        // of the onion.
-                                                                                                                       fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+                                                                                                                       failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
                                                                                                                },
                                                                                                                Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
-                                                                                                                       fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+                                                                                                                       failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
                                                                                                                },
                                                                                                        };
                                                                                                        match next_hop {
                                                                                                                onion_utils::Hop::Receive(hop_data) => {
                                                                                                                        match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
                                                                                                                                Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
-                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+                                                                                                                               Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
                                                                                                                        }
                                                                                                                },
                                                                                                                _ => panic!(),
@@ -3129,7 +3169,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                        }
                                                                                        let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
                                                                                        failed_forwards.push((htlc_source, payment_hash,
-                                                                                               HTLCFailReason::Reason { failure_code, data }
+                                                                                               HTLCFailReason::Reason { failure_code, data },
+                                                                                               HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
                                                                                        ));
                                                                                        continue;
                                                                                },
@@ -3204,9 +3245,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        continue;
                                                                }
                                                        };
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                               handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
-                                                               continue;
+                                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {},
+                                                               e => {
+                                                                       handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+                                                                       continue;
+                                                               }
                                                        }
                                                        log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
                                                                add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
@@ -3258,7 +3302,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                };
 
                                                                macro_rules! fail_htlc {
-                                                                       ($htlc: expr) => {
+                                                                       ($htlc: expr, $payment_hash: expr) => {
                                                                                let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
                                                                                htlc_msat_height_data.extend_from_slice(
                                                                                        &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
@@ -3270,7 +3314,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
                                                                                                phantom_shared_secret,
                                                                                        }), payment_hash,
-                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+                                                                                       HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                                                                       HTLCDestination::FailedPayment { payment_hash: $payment_hash },
                                                                                ));
                                                                        }
                                                                }
@@ -3289,7 +3334,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                if htlcs.len() == 1 {
                                                                                        if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
                                                                                                log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc);
+                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                continue
                                                                                        }
                                                                                }
@@ -3311,7 +3356,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
                                                                                                log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if total_value == $payment_data.total_msat {
                                                                                        htlcs.push(claimable_htlc);
                                                                                        new_events.push(events::Event::PaymentReceived {
@@ -3345,7 +3390,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
                                                                                                        Ok(payment_preimage) => payment_preimage,
                                                                                                        Err(()) => {
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                                continue
                                                                                                        }
                                                                                                };
@@ -3364,7 +3409,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                                        },
                                                                                                        hash_map::Entry::Occupied(_) => {
                                                                                                                log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc);
+                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
                                                                                        }
@@ -3373,17 +3418,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
                                                                                if payment_data.is_none() {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                        continue
                                                                                };
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
-                                                                                       fail_htlc!(claimable_htlc);
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
                                                                                        let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
                                                                                        if payment_received_generated {
@@ -3402,8 +3447,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                }
 
-               for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+               for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+                       self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
                }
                self.forward_htlcs(&mut phantom_receives);
 
@@ -3447,7 +3492,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                self.process_background_events();
        }
 
-       fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, (PublicKey, [u8; 32])>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+       fn update_channel_fee(&self, short_to_chan_info: &mut HashMap<u64, (PublicKey, [u8; 32])>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
                if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
@@ -3467,30 +3512,33 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
                        Ok(res) => Ok(res),
                        Err(e) => {
-                               let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                               let (drop, res) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
                                if drop { retain_channel = false; }
                                Err(res)
                        }
                };
                let ret_err = match res {
                        Ok(Some((update_fee, commitment_signed, monitor_update))) => {
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                       let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
-                                       if drop { retain_channel = false; }
-                                       res
-                               } else {
-                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                               node_id: chan.get_counterparty_node_id(),
-                                               updates: msgs::CommitmentUpdate {
-                                                       update_add_htlcs: Vec::new(),
-                                                       update_fulfill_htlcs: Vec::new(),
-                                                       update_fail_htlcs: Vec::new(),
-                                                       update_fail_malformed_htlcs: Vec::new(),
-                                                       update_fee: Some(update_fee),
-                                                       commitment_signed,
-                                               },
-                                       });
-                                       Ok(())
+                               match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                       ChannelMonitorUpdateStatus::Completed => {
+                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                       node_id: chan.get_counterparty_node_id(),
+                                                       updates: msgs::CommitmentUpdate {
+                                                               update_add_htlcs: Vec::new(),
+                                                               update_fulfill_htlcs: Vec::new(),
+                                                               update_fail_htlcs: Vec::new(),
+                                                               update_fail_malformed_htlcs: Vec::new(),
+                                                               update_fee: Some(update_fee),
+                                                               commitment_signed,
+                                                       },
+                                               });
+                                               Ok(())
+                                       },
+                                       e => {
+                                               let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+                                               if drop { retain_channel = false; }
+                                               res
+                                       }
                                }
                        },
                        Ok(None) => Ok(()),
@@ -3508,16 +3556,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut should_persist = NotifyOption::SkipPersist;
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        {
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_id = &mut channel_state.short_to_id;
+                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push(err);
@@ -3547,7 +3595,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut should_persist = NotifyOption::SkipPersist;
                        if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
 
-                       let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+                       let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
                        let mut handle_errors = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
@@ -3555,10 +3603,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                let mut channel_state_lock = self.channel_state.lock().unwrap();
                                let channel_state = &mut *channel_state_lock;
                                let pending_msg_events = &mut channel_state.pending_msg_events;
-                               let short_to_id = &mut channel_state.short_to_id;
+                               let short_to_chan_info = &mut channel_state.short_to_chan_info;
                                channel_state.by_id.retain(|chan_id, chan| {
                                        let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_chan_info, pending_msg_events, chan_id, chan, new_feerate);
                                        if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
                                        if err.is_err() {
                                                handle_errors.push((err, counterparty_node_id));
@@ -3566,7 +3614,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if !retain_channel { return false; }
 
                                        if let Err(e) = chan.timer_check_closing_negotiation_progress() {
-                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_chan_info, chan, chan_id);
                                                handle_errors.push((Err(err), chan.get_counterparty_node_id()));
                                                if needs_close { return false; }
                                        }
@@ -3626,7 +3674,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
 
                        for htlc_source in timed_out_mpp_htlcs.drain(..) {
-                               self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+                               let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+                               self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
                        }
 
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3652,17 +3701,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let mut channel_state = Some(self.channel_state.lock().unwrap());
-               let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
+               let removed_source = {
+                       let mut channel_state = self.channel_state.lock().unwrap();
+                       channel_state.claimable_htlcs.remove(payment_hash)
+               };
                if let Some((_, mut sources)) = removed_source {
                        for htlc in sources.drain(..) {
-                               if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
                                                self.best_block.read().unwrap().height()));
-                               self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
+                               self.fail_htlc_backwards_internal(
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
-                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+                                               HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+                                               HTLCDestination::FailedPayment { payment_hash: *payment_hash });
                        }
                }
        }
@@ -3697,7 +3748,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
                        let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
                        if desired_err_code == 0x1000 | 20 {
-                               // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+                               // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
+                               // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
                                0u16.write(&mut enc).expect("Writes cannot fail");
                        }
                        (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
@@ -3718,74 +3770,34 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        // be surfaced to the user.
        fn fail_holding_cell_htlcs(
                &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
-               _counterparty_node_id: &PublicKey
+               counterparty_node_id: &PublicKey
        ) {
                for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
-                       match htlc_src {
-                               HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
-                                       let (failure_code, onion_failure_data) =
-                                               match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
-                                                       hash_map::Entry::Occupied(chan_entry) => {
-                                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
-                                                       },
-                                                       hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
-                                               };
-                                       let channel_state = self.channel_state.lock().unwrap();
-                                       self.fail_htlc_backwards_internal(channel_state,
-                                               htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
-                               },
-                               HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
-                                       let mut session_priv_bytes = [0; 32];
-                                       session_priv_bytes.copy_from_slice(&session_priv[..]);
-                                       let mut outbounds = self.pending_outbound_payments.lock().unwrap();
-                                       if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
-                                               if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() {
-                                                       let retry = if let Some(payment_params_data) = payment_params {
-                                                               let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
-                                                               Some(RouteParameters {
-                                                                       payment_params: payment_params_data,
-                                                                       final_value_msat: path_last_hop.fee_msat,
-                                                                       final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
-                                                               })
-                                                       } else { None };
-                                                       let mut pending_events = self.pending_events.lock().unwrap();
-                                                       pending_events.push(events::Event::PaymentPathFailed {
-                                                               payment_id: Some(payment_id),
-                                                               payment_hash,
-                                                               rejected_by_dest: false,
-                                                               network_update: None,
-                                                               all_paths_failed: payment.get().remaining_parts() == 0,
-                                                               path: path.clone(),
-                                                               short_channel_id: None,
-                                                               retry,
-                                                               #[cfg(test)]
-                                                               error_code: None,
-                                                               #[cfg(test)]
-                                                               error_data: None,
-                                                       });
-                                                       if payment.get().abandoned() && payment.get().remaining_parts() == 0 {
-                                                               pending_events.push(events::Event::PaymentFailed {
-                                                                       payment_id,
-                                                                       payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
-                                                               });
-                                                               payment.remove();
-                                                       }
-                                               }
-                                       } else {
-                                               log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
-                                       }
-                               },
-                       };
+                       let (failure_code, onion_failure_data) =
+                               match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
+                                       hash_map::Entry::Occupied(chan_entry) => {
+                                               self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+                                       },
+                                       hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
+                               };
+
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+                       self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
                }
        }
 
        /// Fails an HTLC backwards to the sender of it to us.
-       /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
-       /// There are several callsites that do stupid things like loop over a list of payment_hashes
-       /// to fail and take the channel_state lock for each iteration (as we take ownership and may
-       /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
-       /// still-available channels.
-       fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+       /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
+       fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
+               #[cfg(debug_assertions)]
+               {
+                       // Ensure that the `channel_state` lock is not held when calling this function.
+                       // This ensures that future code doesn't introduce a lock_order requirement for
+                       // `forward_htlcs` to be locked after the `channel_state` lock, which calling this
+                       // function with the `channel_state` locked would.
+                       assert!(self.channel_state.try_lock().is_ok());
+               }
+
                //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
                //identify whether we sent it or not based on the (I presume) very different runtime
                //between the branches here. We should make this async and move it into the forward HTLCs
@@ -3824,8 +3836,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
                                        return;
                                }
-                               mem::drop(channel_state_lock);
-                               let retry = if let Some(payment_params_data) = payment_params {
+                               let mut retry = if let Some(payment_params_data) = payment_params {
                                        let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
                                        Some(RouteParameters {
                                                payment_params: payment_params_data.clone(),
@@ -3841,22 +3852,43 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
 #[cfg(not(test))]
                                                let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
-                                               // TODO: If we decided to blame ourselves (or one of our channels) in
-                                               // process_onion_failure we should close that channel as it implies our
-                                               // next-hop is needlessly blaming us!
-                                               events::Event::PaymentPathFailed {
-                                                       payment_id: Some(payment_id),
-                                                       payment_hash: payment_hash.clone(),
-                                                       rejected_by_dest: !payment_retryable,
-                                                       network_update,
-                                                       all_paths_failed,
-                                                       path: path.clone(),
-                                                       short_channel_id,
-                                                       retry,
-#[cfg(test)]
-                                                       error_code: onion_error_code,
-#[cfg(test)]
-                                                       error_data: onion_error_data
+
+                                               if self.payment_is_probe(payment_hash, &payment_id) {
+                                                       if !payment_retryable {
+                                                               events::Event::ProbeSuccessful {
+                                                                       payment_id,
+                                                                       payment_hash: payment_hash.clone(),
+                                                                       path: path.clone(),
+                                                               }
+                                                       } else {
+                                                               events::Event::ProbeFailed {
+                                                                       payment_id: payment_id,
+                                                                       payment_hash: payment_hash.clone(),
+                                                                       path: path.clone(),
+                                                                       short_channel_id,
+                                                               }
+                                                       }
+                                               } else {
+                                                       // TODO: If we decided to blame ourselves (or one of our channels) in
+                                                       // process_onion_failure we should close that channel as it implies our
+                                                       // next-hop is needlessly blaming us!
+                                                       if let Some(scid) = short_channel_id {
+                                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+                                                       }
+                                                       events::Event::PaymentPathFailed {
+                                                               payment_id: Some(payment_id),
+                                                               payment_hash: payment_hash.clone(),
+                                                               payment_failed_permanently: !payment_retryable,
+                                                               network_update,
+                                                               all_paths_failed,
+                                                               path: path.clone(),
+                                                               short_channel_id,
+                                                               retry,
+                                                               #[cfg(test)]
+                                                               error_code: onion_error_code,
+                                                               #[cfg(test)]
+                                                               error_data: onion_error_data
+                                                       }
                                                }
                                        },
                                        &HTLCFailReason::Reason {
@@ -3872,19 +3904,31 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                // ChannelDetails.
                                                // TODO: For non-temporary failures, we really should be closing the
                                                // channel here as we apparently can't relay through them anyway.
-                                               events::Event::PaymentPathFailed {
-                                                       payment_id: Some(payment_id),
-                                                       payment_hash: payment_hash.clone(),
-                                                       rejected_by_dest: path.len() == 1,
-                                                       network_update: None,
-                                                       all_paths_failed,
-                                                       path: path.clone(),
-                                                       short_channel_id: Some(path.first().unwrap().short_channel_id),
-                                                       retry,
+                                               let scid = path.first().unwrap().short_channel_id;
+                                               retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+
+                                               if self.payment_is_probe(payment_hash, &payment_id) {
+                                                       events::Event::ProbeFailed {
+                                                               payment_id: payment_id,
+                                                               payment_hash: payment_hash.clone(),
+                                                               path: path.clone(),
+                                                               short_channel_id: Some(scid),
+                                                       }
+                                               } else {
+                                                       events::Event::PaymentPathFailed {
+                                                               payment_id: Some(payment_id),
+                                                               payment_hash: payment_hash.clone(),
+                                                               payment_failed_permanently: false,
+                                                               network_update: None,
+                                                               all_paths_failed,
+                                                               path: path.clone(),
+                                                               short_channel_id: Some(scid),
+                                                               retry,
 #[cfg(test)]
-                                                       error_code: Some(*failure_code),
+                                                               error_code: Some(*failure_code),
 #[cfg(test)]
-                                                       error_data: Some(data.clone()),
+                                                               error_data: Some(data.clone()),
+                                                       }
                                                }
                                        }
                                };
@@ -3892,7 +3936,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                pending_events.push(path_failure);
                                if let Some(ev) = full_failure_ev { pending_events.push(ev); }
                        },
-                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+                       HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
                                let err_packet = match onion_error {
                                        HTLCFailReason::Reason { failure_code, data } => {
                                                log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
@@ -3912,10 +3956,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                };
 
                                let mut forward_event = None;
-                               if channel_state_lock.forward_htlcs.is_empty() {
+                               let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+                               if forward_htlcs.is_empty() {
                                        forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
                                }
-                               match channel_state_lock.forward_htlcs.entry(short_channel_id) {
+                               match forward_htlcs.entry(short_channel_id) {
                                        hash_map::Entry::Occupied(mut entry) => {
                                                entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
                                        },
@@ -3923,13 +3968,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
                                        }
                                }
-                               mem::drop(channel_state_lock);
+                               mem::drop(forward_htlcs);
+                               let mut pending_events = self.pending_events.lock().unwrap();
                                if let Some(time) = forward_event {
-                                       let mut pending_events = self.pending_events.lock().unwrap();
                                        pending_events.push(events::Event::PendingHTLCsForwardable {
                                                time_forwardable: time
                                        });
                                }
+                               pending_events.push(events::Event::HTLCHandlingFailed {
+                                       prev_channel_id: outpoint.to_channel_id(),
+                                       failed_next_destination: destination
+                               });
                        },
                }
        }
@@ -3957,8 +4006,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
-               let mut channel_state = Some(self.channel_state.lock().unwrap());
-               let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
+               let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash);
                if let Some((payment_purpose, mut sources)) = removed_source {
                        assert!(!sources.is_empty());
 
@@ -3976,8 +4024,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut claimable_amt_msat = 0;
                        let mut expected_amt_msat = None;
                        let mut valid_mpp = true;
+                       let mut errs = Vec::new();
+                       let mut claimed_any_htlcs = false;
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
                        for htlc in sources.iter() {
-                               if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
+                               if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) {
                                        valid_mpp = false;
                                        break;
                                }
@@ -4010,20 +4062,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        expected_amt_msat.unwrap(), claimable_amt_msat);
                                return;
                        }
-
-                       let mut errs = Vec::new();
-                       let mut claimed_any_htlcs = false;
-                       for htlc in sources.drain(..) {
-                               if !valid_mpp {
-                                       if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
-                                       let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
-                                       htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
-                                                       self.best_block.read().unwrap().height()));
-                                       self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
-                                                                        HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
-                                                                        HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
-                               } else {
-                                       match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
+                       if valid_mpp {
+                               for htlc in sources.drain(..) {
+                                       match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
                                                ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
                                                        if let msgs::ErrorAction::IgnoreError = err.err.action {
                                                                // We got a temporary failure updating monitor, but will claim the
@@ -4043,6 +4084,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                }
                        }
+                       mem::drop(channel_state_lock);
+                       if !valid_mpp {
+                               for htlc in sources.drain(..) {
+                                       let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+                                       htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
+                                               self.best_block.read().unwrap().height()));
+                                       self.fail_htlc_backwards_internal(
+                                               HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
+                                               HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+                                               HTLCDestination::FailedPayment { payment_hash } );
+                               }
+                       }
 
                        if claimed_any_htlcs {
                                self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
@@ -4052,10 +4105,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                });
                        }
 
-                       // Now that we've done the entire above loop in one lock, we can handle any errors
-                       // which were generated.
-                       channel_state.take();
-
+                       // Now we can handle any errors which were generated.
                        for (counterparty_node_id, err) in errs.drain(..) {
                                let res: Result<(), _> = Err(err);
                                let _ = handle_error!(self, res, counterparty_node_id);
@@ -4066,7 +4116,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
                //TODO: Delay the claimed_funds relaying just like we do outbound relay!
                let channel_state = &mut **channel_state_lock;
-               let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
+               let chan_id = match channel_state.short_to_chan_info.get(&prev_hop.short_channel_id) {
                        Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                return ClaimFundsFromHop::PrevHopForceClosed
@@ -4077,15 +4127,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
                                Ok(msgs_monitor_option) => {
                                        if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
-                                                               "Failed to update channel monitor with preimage {:?}: {:?}",
-                                                               payment_preimage, e);
-                                                       return ClaimFundsFromHop::MonitorUpdateFail(
-                                                               chan.get().get_counterparty_node_id(),
-                                                               handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
-                                                               Some(htlc_value_msat)
-                                                       );
+                                               match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                                       ChannelMonitorUpdateStatus::Completed => {},
+                                                       e => {
+                                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+                                                                       "Failed to update channel monitor with preimage {:?}: {:?}",
+                                                                       payment_preimage, e);
+                                                               return ClaimFundsFromHop::MonitorUpdateFail(
+                                                                       chan.get().get_counterparty_node_id(),
+                                                                       handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+                                                                       Some(htlc_value_msat)
+                                                               );
+                                                       }
                                                }
                                                if let Some((msg, commitment_signed)) = msgs {
                                                        log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
@@ -4108,13 +4161,16 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        }
                                },
                                Err((e, monitor_update)) => {
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                               log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
-                                                       "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
-                                                       payment_preimage, e);
+                                       match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+                                                               "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+                                                               payment_preimage, e);
+                                               },
                                        }
                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
-                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+                                       let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id);
                                        if drop {
                                                chan.remove_entry();
                                        }
@@ -4218,9 +4274,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // We update the ChannelMonitor on the backward link, after
                                        // receiving an offchain preimage event from the forward link (the
                                        // event being update_fulfill_htlc).
-                                       if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+                                       let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               // TODO: This needs to be handled somehow - if we receive a monitor update
+                                               // with a preimage we *must* somehow manage to propagate it to the upstream
+                                               // channel, or we must have an ability to receive the same event and try
+                                               // again on restart.
                                                log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
-                                                                                        payment_preimage, e);
+                                                       payment_preimage, update_res);
                                        }
                                        // Note that we do *not* set `claimed_htlc` to false here. In fact, this
                                        // totally could be a duplicate claim, but we have no way of knowing
@@ -4265,7 +4326,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
 
                let chan_restoration_res;
-               let (mut pending_failures, finalized_claims) = {
+               let (mut pending_failures, finalized_claims, counterparty_node_id) = {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
@@ -4276,6 +4337,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return;
                        }
 
+                       let counterparty_node_id = channel.get().get_counterparty_node_id();
                        let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
                        let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
                                // We only send a channel_update in the case where we are just now sending a
@@ -4294,12 +4356,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        if let Some(upd) = channel_update {
                                channel_state.pending_msg_events.push(upd);
                        }
-                       (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+                       (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
                };
                post_handle_chan_restoration!(self, chan_restoration_res);
                self.finalize_claims(finalized_claims);
                for failure in pending_failures.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+                       self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
                }
        }
 
@@ -4482,29 +4546,28 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
                // lock before watch_channel
-               if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
-                       match e {
-                               ChannelMonitorUpdateErr::PermanentFailure => {
-                                       // Note that we reply with the new channel_id in error messages if we gave up on the
-                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                                       // any messages referencing a previously-closed channel anyway.
-                                       // We do not do a force-close here as that would generate a monitor update for
-                                       // a monitor that we didn't manage to store (and that we don't care about - we
-                                       // don't respond with the funding_signed so the channel can never go on chain).
-                                       let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
-                                       assert!(failed_htlcs.is_empty());
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
-                               },
-                               ChannelMonitorUpdateErr::TemporaryFailure => {
-                                       // There's no problem signing a counterparty's funding transaction if our monitor
-                                       // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                                       // until we have persisted our monitor.
-                                       chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
-                                       channel_ready = None; // Don't send the channel_ready now
-                               },
-                       }
+               match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
+                       ChannelMonitorUpdateStatus::Completed => {},
+                       ChannelMonitorUpdateStatus::PermanentFailure => {
+                               // Note that we reply with the new channel_id in error messages if we gave up on the
+                               // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+                               // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+                               // any messages referencing a previously-closed channel anyway.
+                               // We do not propagate the monitor update to the user as it would be for a monitor
+                               // that we didn't manage to store (and that we don't care about - we don't respond
+                               // with the funding_signed so the channel can never go on chain).
+                               let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
+                               assert!(failed_htlcs.is_empty());
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
+                       },
+                       ChannelMonitorUpdateStatus::InProgress => {
+                               // There's no problem signing a counterparty's funding transaction if our monitor
+                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                               // until we have persisted our monitor.
+                               chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+                               channel_ready = None; // Don't send the channel_ready now
+                       },
                }
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -4513,12 +4576,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
+                               let mut id_to_peer = self.id_to_peer.lock().unwrap();
+                               match id_to_peer.entry(chan.channel_id()) {
+                                       hash_map::Entry::Occupied(_) => {
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                                                       "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
+                                                       funding_msg.channel_id))
+                                       },
+                                       hash_map::Entry::Vacant(i_e) => {
+                                               i_e.insert(chan.get_counterparty_node_id());
+                                       }
+                               }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
                                        node_id: counterparty_node_id.clone(),
                                        msg: funding_msg,
                                });
                                if let Some(msg) = channel_ready {
-                                       send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan, msg);
+                                       send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan, msg);
                                }
                                e.insert(chan);
                        }
@@ -4540,20 +4614,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
-                                       if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
-                                               let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
-                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                       // monitor update contained within `shutdown_finish` was applied.
-                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                               shutdown_finish.0.take();
+                                       match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+                                               ChannelMonitorUpdateStatus::Completed => {},
+                                               e => {
+                                                       let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+                                                       if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+                                                               // We weren't able to watch the channel to begin with, so no updates should be made on
+                                                               // it. Previously, full_stack_target found an (unreachable) panic when the
+                                                               // monitor update contained within `shutdown_finish` was applied.
+                                                               if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+                                                                       shutdown_finish.0.take();
+                                                               }
                                                        }
-                                               }
-                                               return res
+                                                       return res
+                                               },
                                        }
                                        if let Some(msg) = channel_ready {
-                                               send_channel_ready!(channel_state.short_to_id, channel_state.pending_msg_events, chan.get(), msg);
+                                               send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg);
                                        }
                                        funding_tx
                                },
@@ -4624,13 +4701,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                        // Update the monitor with the shutdown script if necessary.
                                        if let Some(monitor_update) = monitor_update {
-                                               if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
-                                                       let (result, is_permanent) =
-                                                               handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
-                                                       if is_permanent {
-                                                               remove_channel!(self, channel_state, chan_entry);
-                                                               break result;
-                                                       }
+                                               let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+                                               let (result, is_permanent) =
+                                                       handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+                                               if is_permanent {
+                                                       remove_channel!(self, channel_state, chan_entry);
+                                                       break result;
                                                }
                                        }
 
@@ -4647,7 +4723,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+                       self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                let _ = handle_error!(self, result, *counterparty_node_id);
@@ -4708,7 +4785,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+               let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
 
                match channel_state.by_id.entry(msg.channel_id) {
@@ -4817,9 +4895,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                },
                                                Ok(res) => res
                                        };
-                               if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
-                                       return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+                               let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+                                       return Err(e);
                                }
+
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                        node_id: counterparty_node_id.clone(),
                                        msg: revoke_and_ack,
@@ -4848,12 +4928,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
                        let mut forward_event = None;
                        if !pending_forwards.is_empty() {
-                               let mut channel_state = self.channel_state.lock().unwrap();
-                               if channel_state.forward_htlcs.is_empty() {
+                               let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+                               if forward_htlcs.is_empty() {
                                        forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
                                }
                                for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
-                                       match channel_state.forward_htlcs.entry(match forward_info.routing {
+                                       match forward_htlcs.entry(match forward_info.routing {
                                                        PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
                                                        PendingHTLCRouting::Receive { .. } => 0,
                                                        PendingHTLCRouting::ReceiveKeysend { .. } => 0,
@@ -4891,26 +4971,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+                                       let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
                                        let raa_updates = break_chan_entry!(self,
                                                chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
                                        htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
-                                       if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
-                                               if was_frozen_for_monitor {
-                                                       assert!(raa_updates.commitment_update.is_none());
-                                                       assert!(raa_updates.accepted_htlcs.is_empty());
-                                                       assert!(raa_updates.failed_htlcs.is_empty());
-                                                       assert!(raa_updates.finalized_claimed_htlcs.is_empty());
-                                                       break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
-                                               } else {
-                                                       if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
-                                                                       RAACommitmentOrder::CommitmentFirst, false,
-                                                                       raa_updates.commitment_update.is_some(), false,
-                                                                       raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
-                                                                       raa_updates.finalized_claimed_htlcs) {
-                                                               break Err(e);
-                                                       } else { unreachable!(); }
-                                               }
+                                       let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+                                       if was_paused_for_mon_update {
+                                               assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+                                               assert!(raa_updates.commitment_update.is_none());
+                                               assert!(raa_updates.accepted_htlcs.is_empty());
+                                               assert!(raa_updates.failed_htlcs.is_empty());
+                                               assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+                                               break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+                                       }
+                                       if update_res != ChannelMonitorUpdateStatus::Completed {
+                                               if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan,
+                                                               RAACommitmentOrder::CommitmentFirst, false,
+                                                               raa_updates.commitment_update.is_some(), false,
+                                                               raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+                                                               raa_updates.finalized_claimed_htlcs) {
+                                                       break Err(e);
+                                               } else { unreachable!(); }
                                        }
                                        if let Some(updates) = raa_updates.commitment_update {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -4933,7 +5014,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                short_channel_id, channel_outpoint)) =>
                        {
                                for failure in pending_failures.drain(..) {
-                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+                                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+                                       self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
                                }
                                self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
                                self.finalize_claims(finalized_claim_htlcs);
@@ -4988,7 +5070,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
-               let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
+               let chan_id = match channel_state.short_to_chan_info.get(&msg.contents.short_channel_id) {
                        Some((_cp_id, chan_id)) => chan_id.clone(),
                        None => {
                                // It's not a local channel
@@ -5011,6 +5093,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if were_node_one == msg_from_node_one {
                                        return Ok(NotifyOption::SkipPersist);
                                } else {
+                                       log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
                                        try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
                                }
                        },
@@ -5080,7 +5163,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
-               for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+               for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
                        for monitor_event in monitor_events.drain(..) {
                                match monitor_event {
                                        MonitorEvent::HTLCEvent(htlc_update) => {
@@ -5089,7 +5172,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
                                                } else {
                                                        log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
-                                                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                                                       let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+                                                       self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                                                }
                                        },
                                        MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@ -5120,7 +5204,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        });
                                                }
                                        },
-                                       MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
+                                       MonitorEvent::Completed { funding_txo, monitor_update_id } => {
                                                self.channel_monitor_updated(&funding_txo, monitor_update_id);
                                        },
                                }
@@ -5158,7 +5242,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
@@ -5172,22 +5256,25 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                        ));
                                                }
                                                if let Some((commitment_update, monitor_update)) = commitment_opt {
-                                                       if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
-                                                               has_monitor_update = true;
-                                                               let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
-                                                               handle_errors.push((chan.get_counterparty_node_id(), res));
-                                                               if close_channel { return false; }
-                                                       } else {
-                                                               pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                                       node_id: chan.get_counterparty_node_id(),
-                                                                       updates: commitment_update,
-                                                               });
+                                                       match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+                                                               ChannelMonitorUpdateStatus::Completed => {
+                                                                       pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                                               node_id: chan.get_counterparty_node_id(),
+                                                                               updates: commitment_update,
+                                                                       });
+                                                               },
+                                                               e => {
+                                                                       has_monitor_update = true;
+                                                                       let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+                                                                       handle_errors.push((chan.get_counterparty_node_id(), res));
+                                                                       if close_channel { return false; }
+                                                               },
                                                        }
                                                }
                                                true
                                        },
                                        Err(e) => {
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                // ChannelClosed event is generated by handle_error for us
                                                !close_channel
@@ -5218,7 +5305,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let by_id = &mut channel_state.by_id;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
 
                        by_id.retain(|channel_id, chan| {
@@ -5243,13 +5330,13 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
                                                        log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
                                                        self.tx_broadcaster.broadcast_transaction(&tx);
-                                                       update_maps_on_chan_removal!(self, short_to_id, chan);
+                                                       update_maps_on_chan_removal!(self, short_to_chan_info, chan);
                                                        false
                                                } else { true }
                                        },
                                        Err(e) => {
                                                has_update = true;
-                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_chan_info, chan, channel_id);
                                                handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
                                                !close_channel
                                        }
@@ -5444,7 +5531,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                loop {
                        let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
                        // Ensure the generated scid doesn't conflict with a real channel.
-                       match channel_state.short_to_id.entry(scid_candidate) {
+                       match channel_state.short_to_chan_info.entry(scid_candidate) {
                                hash_map::Entry::Occupied(_) => continue,
                                hash_map::Entry::Vacant(_) => return scid_candidate
                        }
@@ -5532,10 +5619,6 @@ where
        ///
        /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
        /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
-       ///
-       /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
-       /// when processed, an [`EventHandler`] must be able to handle previously seen events when
-       /// restarting from an old state.
        fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
                        let mut result = NotifyOption::SkipPersist;
@@ -5654,7 +5737,6 @@ where
                                }
                        }
                }
-               max_time!(self.last_node_announcement_serial);
                max_time!(self.highest_seen_timestamp);
                let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
                payment_secrets.retain(|_, inbound_payment| {
@@ -5679,7 +5761,7 @@ where
 
        fn get_relevant_txids(&self) -> Vec<Txid> {
                let channel_state = self.channel_state.lock().unwrap();
-               let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+               let mut res = Vec::with_capacity(channel_state.short_to_chan_info.len());
                for chan in channel_state.by_id.values() {
                        if let Some(funding_txo) = chan.get_funding_txo() {
                                res.push(funding_txo.txid);
@@ -5722,7 +5804,7 @@ where
                {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
                                let res = f(channel);
@@ -5731,10 +5813,10 @@ where
                                                let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
                                                timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
                                                        failure_code, data,
-                                               }));
+                                               }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
                                        }
                                        if let Some(channel_ready) = channel_ready_opt {
-                                               send_channel_ready!(short_to_id, pending_msg_events, channel, channel_ready);
+                                               send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
                                                if channel.is_usable() {
                                                        log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
                                                        if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
@@ -5767,18 +5849,19 @@ where
                                        if channel.is_our_channel_ready() {
                                                if let Some(real_scid) = channel.get_short_channel_id() {
                                                        // If we sent a 0conf channel_ready, and now have an SCID, we add it
-                                                       // to the short_to_id map here. Note that we check whether we can relay
-                                                       // using the real SCID at relay-time (i.e. enforce option_scid_alias
-                                                       // then), and if the funding tx is ever un-confirmed we force-close the
-                                                       // channel, ensuring short_to_id is always consistent.
-                                                       let scid_insert = short_to_id.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                                       // to the short_to_chan_info map here. Note that we check whether we
+                                                       // can relay using the real SCID at relay-time (i.e.
+                                                       // enforce option_scid_alias then), and if the funding tx is ever
+                                                       // un-confirmed we force-close the channel, ensuring short_to_chan_info
+                                                       // is always consistent.
+                                                       let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
                                                        assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
                                                                "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
                                                                fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
                                                }
                                        }
                                } else if let Err(reason) = res {
-                                       update_maps_on_chan_removal!(self, short_to_id, channel);
+                                       update_maps_on_chan_removal!(self, short_to_chan_info, channel);
                                        // It looks like our counterparty went on-chain or funding transaction was
                                        // reorged out of the main chain. Close the channel.
                                        failed_channels.push(channel.force_shutdown(true));
@@ -5811,10 +5894,11 @@ where
                                                if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
                                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
                                                        timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
                                                                failure_code: 0x4000 | 15,
                                                                data: htlc_msat_height_data
-                                                       }));
+                                                       }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
                                                        false
                                                } else { true }
                                        });
@@ -5825,8 +5909,8 @@ where
 
                self.handle_init_event_channel_failures(failed_channels);
 
-               for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
-                       self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+               for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+                       self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
                }
        }
 
@@ -5848,12 +5932,16 @@ where
                self.persistence_notifier.wait()
        }
 
+       /// Gets a [`Future`] that completes when a persistable update is available. Note that
+       /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and
+       /// should instead register actions to be taken later.
+       pub fn get_persistable_update_future(&self) -> Future {
+               self.persistence_notifier.get_future()
+       }
+
        #[cfg(any(test, feature = "_test_utils"))]
        pub fn get_persistence_condvar_value(&self) -> bool {
-               let mutcond = &self.persistence_notifier.persistence_lock;
-               let &(ref mtx, _) = mutcond;
-               let guard = mtx.lock().unwrap();
-               *guard
+               self.persistence_notifier.notify_pending()
        }
 
        /// Gets the latest best block which was connected either via the [`chain::Listen`] or
@@ -5969,14 +6057,14 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
-                       let short_to_id = &mut channel_state.short_to_id;
+                       let short_to_chan_info = &mut channel_state.short_to_chan_info;
                        log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
                                log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
                        channel_state.by_id.retain(|_, chan| {
                                if chan.get_counterparty_node_id() == *counterparty_node_id {
                                        chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
                                        if chan.is_shutdown() {
-                                               update_maps_on_chan_removal!(self, short_to_id, chan);
+                                               update_maps_on_chan_removal!(self, short_to_chan_info, chan);
                                                self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
                                                return false;
                                        } else {
@@ -5998,8 +6086,8 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id,
+                                       &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
-                                       &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
                                        &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
                                        &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
@@ -6019,7 +6107,12 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                }
        }
 
-       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
+       fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+               if !init_msg.features.supports_static_remote_key() {
+                       log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+                       return Err(());
+               }
+
                log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
@@ -6042,7 +6135,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                let channel_state = &mut *channel_state_lock;
                let pending_msg_events = &mut channel_state.pending_msg_events;
                channel_state.by_id.retain(|_, chan| {
-                       if chan.get_counterparty_node_id() == *counterparty_node_id {
+                       let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
                                if !chan.have_received_message() {
                                        // If we created this (outbound) channel while we were disconnected from the
                                        // peer we probably failed to send the open_channel message, which is now
@@ -6056,9 +6149,21 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                                        });
                                        true
                                }
-                       } else { true }
+                       } else { true };
+                       if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+                               if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
+                                       if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
+                                               pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
+                                                       node_id: *counterparty_node_id,
+                                                       msg, update_msg,
+                                               });
+                                       }
+                               }
+                       }
+                       retain
                });
                //TODO: Also re-broadcast announcement_signatures
+               Ok(())
        }
 
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
@@ -6093,77 +6198,57 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
                        let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
                }
        }
-}
-
-/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
-struct PersistenceNotifier {
-       /// Users won't access the persistence_lock directly, but rather wait on its bool using
-       /// `wait_timeout` and `wait`.
-       persistence_lock: (Mutex<bool>, Condvar),
-}
 
-impl PersistenceNotifier {
-       fn new() -> Self {
-               Self {
-                       persistence_lock: (Mutex::new(false), Condvar::new()),
-               }
+       fn provided_node_features(&self) -> NodeFeatures {
+               provided_node_features()
        }
 
-       fn wait(&self) {
-               loop {
-                       let &(ref mtx, ref cvar) = &self.persistence_lock;
-                       let mut guard = mtx.lock().unwrap();
-                       if *guard {
-                               *guard = false;
-                               return;
-                       }
-                       guard = cvar.wait(guard).unwrap();
-                       let result = *guard;
-                       if result {
-                               *guard = false;
-                               return
-                       }
-               }
+       fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
+               provided_init_features()
        }
+}
 
-       #[cfg(any(test, feature = "std"))]
-       fn wait_timeout(&self, max_wait: Duration) -> bool {
-               let current_time = Instant::now();
-               loop {
-                       let &(ref mtx, ref cvar) = &self.persistence_lock;
-                       let mut guard = mtx.lock().unwrap();
-                       if *guard {
-                               *guard = false;
-                               return true;
-                       }
-                       guard = cvar.wait_timeout(guard, max_wait).unwrap().0;
-                       // Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
-                       // desired wait time has actually passed, and if not then restart the loop with a reduced wait
-                       // time. Note that this logic can be highly simplified through the use of
-                       // `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
-                       // 1.42.0.
-                       let elapsed = current_time.elapsed();
-                       let result = *guard;
-                       if result || elapsed >= max_wait {
-                               *guard = false;
-                               return result;
-                       }
-                       match max_wait.checked_sub(elapsed) {
-                               None => return result,
-                               Some(_) => continue
-                       }
-               }
-       }
+/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_node_features() -> NodeFeatures {
+       provided_init_features().to_context()
+}
 
-       // Signal to the ChannelManager persister that there are updates necessitating persisting to disk.
-       fn notify(&self) {
-               let &(ref persist_mtx, ref cnd) = &self.persistence_lock;
-               let mut persistence_lock = persist_mtx.lock().unwrap();
-               *persistence_lock = true;
-               mem::drop(persistence_lock);
-               cnd.notify_all();
-       }
+/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+///
+/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
+/// or not. Thus, this method is not public.
+#[cfg(any(feature = "_test_utils", test))]
+pub fn provided_invoice_features() -> InvoiceFeatures {
+       provided_init_features().to_context()
+}
+
+/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_channel_features() -> ChannelFeatures {
+       provided_init_features().to_context()
+}
+
+/// Fetches the set of [`InitFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_init_features() -> InitFeatures {
+       // Note that if new features are added here which other peers may (eventually) require, we
+       // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
+       // ErroringMessageHandler.
+       let mut features = InitFeatures::empty();
+       features.set_data_loss_protect_optional();
+       features.set_upfront_shutdown_script_optional();
+       features.set_variable_length_onion_required();
+       features.set_static_remote_key_required();
+       features.set_payment_secret_required();
+       features.set_basic_mpp_optional();
+       features.set_wumbo_optional();
+       features.set_shutdown_any_segwit_optional();
+       features.set_channel_type_optional();
+       features.set_scid_privacy_optional();
+       features.set_zero_conf_optional();
+       features
 }
 
 const SERIALIZATION_VERSION: u8 = 1;
@@ -6524,29 +6609,37 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        best_block.block_hash().write(writer)?;
                }
 
-               let channel_state = self.channel_state.lock().unwrap();
-               let mut unfunded_channels = 0;
-               for (_, channel) in channel_state.by_id.iter() {
-                       if !channel.is_funding_initiated() {
-                               unfunded_channels += 1;
+               {
+                       // Take `channel_state` lock temporarily to avoid creating a lock order that requires
+                       // that the `forward_htlcs` lock is taken after `channel_state`
+                       let channel_state = self.channel_state.lock().unwrap();
+                       let mut unfunded_channels = 0;
+                       for (_, channel) in channel_state.by_id.iter() {
+                               if !channel.is_funding_initiated() {
+                                       unfunded_channels += 1;
+                               }
                        }
-               }
-               ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
-               for (_, channel) in channel_state.by_id.iter() {
-                       if channel.is_funding_initiated() {
-                               channel.write(writer)?;
+                       ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+                       for (_, channel) in channel_state.by_id.iter() {
+                               if channel.is_funding_initiated() {
+                                       channel.write(writer)?;
+                               }
                        }
                }
 
-               (channel_state.forward_htlcs.len() as u64).write(writer)?;
-               for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
-                       short_channel_id.write(writer)?;
-                       (pending_forwards.len() as u64).write(writer)?;
-                       for forward in pending_forwards {
-                               forward.write(writer)?;
+               {
+                       let forward_htlcs = self.forward_htlcs.lock().unwrap();
+                       (forward_htlcs.len() as u64).write(writer)?;
+                       for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
+                               short_channel_id.write(writer)?;
+                               (pending_forwards.len() as u64).write(writer)?;
+                               for forward in pending_forwards {
+                                       forward.write(writer)?;
+                               }
                        }
                }
 
+               let channel_state = self.channel_state.lock().unwrap();
                let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
                (channel_state.claimable_htlcs.len() as u64).write(writer)?;
                for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() {
@@ -6586,7 +6679,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        }
                }
 
-               (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+               // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
+               // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
+               // likely to be identical.
+               (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
                (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
 
                (pending_inbound_payments.len() as u64).write(writer)?;
@@ -6633,6 +6729,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        (5, self.our_network_pubkey, required),
                        (7, self.fake_scid_rand_bytes, required),
                        (9, htlc_purposes, vec_type),
+                       (11, self.probing_cookie_secret, required),
                });
 
                Ok(())
@@ -6776,7 +6873,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let channel_count: u64 = Readable::read(reader)?;
                let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
                let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
-               let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+               let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                let mut channel_closures = Vec::new();
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
@@ -6816,7 +6914,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                } else {
                                        log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
-                                               short_to_id.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                               short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+                                       }
+                                       if channel.is_funding_initiated() {
+                                               id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
                                        }
                                        by_id.insert(channel.channel_id(), channel);
                                }
@@ -6900,7 +7001,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
-               let last_node_announcement_serial: u32 = Readable::read(reader)?;
+               let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
 
                let pending_inbound_payment_count: u64 = Readable::read(reader)?;
@@ -6929,6 +7030,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut pending_outbound_payments = None;
                let mut received_network_pubkey: Option<PublicKey> = None;
                let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
+               let mut probing_cookie_secret: Option<[u8; 32]> = None;
                let mut claimable_htlc_purposes = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
@@ -6936,11 +7038,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        (5, received_network_pubkey, option),
                        (7, fake_scid_rand_bytes, option),
                        (9, claimable_htlc_purposes, vec_type),
+                       (11, probing_cookie_secret, option),
                });
                if fake_scid_rand_bytes.is_none() {
                        fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
                }
 
+               if probing_cookie_secret.is_none() {
+                       probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes());
+               }
+
                if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
                        pending_outbound_payments = Some(pending_outbound_payments_compat);
                } else if pending_outbound_payments.is_none() {
@@ -7075,7 +7182,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                return Err(DecodeError::InvalidValue);
                        }
                        if chan.is_usable() {
-                               if short_to_id.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+                               if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
                                        // Note that in rare cases its possible to hit this while reading an older
                                        // channel if we just happened to pick a colliding outbound alias above.
                                        log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
@@ -7084,6 +7191,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
+               let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
+
                for (_, monitor) in args.channel_monitors.iter() {
                        for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
                                if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
@@ -7112,7 +7221,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                        channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
                                                }
                                                if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
-                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &args.fee_estimator, &args.logger);
+                                                       previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
                                                }
                                        }
                                        pending_events_read.push(events::Event::PaymentClaimed {
@@ -7126,7 +7235,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                let channel_manager = ChannelManager {
                        genesis_hash,
-                       fee_estimator: args.fee_estimator,
+                       fee_estimator: bounded_fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
 
@@ -7134,8 +7243,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
-                               short_to_id,
-                               forward_htlcs,
+                               short_to_chan_info,
                                claimable_htlcs,
                                pending_msg_events: Vec::new(),
                        }),
@@ -7143,14 +7251,17 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        pending_inbound_payments: Mutex::new(pending_inbound_payments),
                        pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
 
+                       forward_htlcs: Mutex::new(forward_htlcs),
                        outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
+                       id_to_peer: Mutex::new(id_to_peer),
                        fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
 
+                       probing_cookie_secret: probing_cookie_secret.unwrap(),
+
                        our_network_key,
                        our_network_pubkey,
                        secp_ctx,
 
-                       last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
                        highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
 
                        per_peer_state: RwLock::new(per_peer_state),
@@ -7158,7 +7269,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        pending_events: Mutex::new(pending_events_read),
                        pending_background_events: Mutex::new(pending_background_events_read),
                        total_consistency_lock: RwLock::new(()),
-                       persistence_notifier: PersistenceNotifier::new(),
+                       persistence_notifier: Notifier::new(),
 
                        keys_manager: args.keys_manager,
                        logger: args.logger,
@@ -7166,7 +7277,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                };
 
                for htlc_source in failed_htlcs.drain(..) {
-                       channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+                       let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+                       let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+                       channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
                }
 
                //TODO: Broadcast channel update for closed channels, but only after we've made a
@@ -7183,66 +7296,16 @@ mod tests {
        use core::time::Duration;
        use core::sync::atomic::Ordering;
        use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
-       use ln::channelmanager::{PaymentId, PaymentSendFailure};
-       use ln::channelmanager::inbound_payment;
-       use ln::features::InitFeatures;
+       use ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure};
        use ln::functional_test_utils::*;
        use ln::msgs;
        use ln::msgs::ChannelMessageHandler;
        use routing::router::{PaymentParameters, RouteParameters, find_route};
        use util::errors::APIError;
-       use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+       use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use util::test_utils;
        use chain::keysinterface::KeysInterface;
 
-       #[cfg(feature = "std")]
-       #[test]
-       fn test_wait_timeout() {
-               use ln::channelmanager::PersistenceNotifier;
-               use sync::Arc;
-               use core::sync::atomic::AtomicBool;
-               use std::thread;
-
-               let persistence_notifier = Arc::new(PersistenceNotifier::new());
-               let thread_notifier = Arc::clone(&persistence_notifier);
-
-               let exit_thread = Arc::new(AtomicBool::new(false));
-               let exit_thread_clone = exit_thread.clone();
-               thread::spawn(move || {
-                       loop {
-                               let &(ref persist_mtx, ref cnd) = &thread_notifier.persistence_lock;
-                               let mut persistence_lock = persist_mtx.lock().unwrap();
-                               *persistence_lock = true;
-                               cnd.notify_all();
-
-                               if exit_thread_clone.load(Ordering::SeqCst) {
-                                       break
-                               }
-                       }
-               });
-
-               // Check that we can block indefinitely until updates are available.
-               let _ = persistence_notifier.wait();
-
-               // Check that the PersistenceNotifier will return after the given duration if updates are
-               // available.
-               loop {
-                       if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
-                               break
-                       }
-               }
-
-               exit_thread.store(true, Ordering::SeqCst);
-
-               // Check that the PersistenceNotifier will return after the given duration even if no updates
-               // are available.
-               loop {
-                       if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
-                               break
-                       }
-               }
-       }
-
        #[test]
        fn test_notify_limits() {
                // Check that a few cases which don't require the persistence of a new ChannelManager,
@@ -7258,7 +7321,7 @@ mod tests {
                assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
                assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
 
-               let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
                // We check that the channel info nodes have doesn't change too early, even though we try
                // to connect messages with new values
@@ -7329,7 +7392,7 @@ mod tests {
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
 
                // First, send a partial MPP payment.
                let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
@@ -7354,7 +7417,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7447,7 +7510,7 @@ mod tests {
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+               create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
                let scorer = test_utils::TestScorer::with_penalty(0);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
 
@@ -7474,8 +7537,10 @@ mod tests {
                nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+               // fails), the second will process the resulting failure and fail the HTLC backward
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7516,7 +7581,7 @@ mod tests {
                check_added_monitors!(nodes[1], 0);
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
                check_added_monitors!(nodes[1], 1);
                let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
                assert!(updates.update_add_htlcs.is_empty());
@@ -7543,10 +7608,10 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
 
-               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::for_keysend(payee_pubkey),
                        final_value_msat: 10000,
@@ -7587,10 +7652,10 @@ mod tests {
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
                let payee_pubkey = nodes[1].node.get_our_node_id();
-               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
-               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+               nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+               nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
 
-               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+               let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::for_keysend(payee_pubkey),
                        final_value_msat: 10000,
@@ -7629,10 +7694,10 @@ mod tests {
                let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
                let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
 
-               let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-               let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-               let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
-               let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+               let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+               let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+               let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+               let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
 
                // Marshall an MPP route.
                let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
@@ -7680,6 +7745,119 @@ mod tests {
                // Check that using the original payment hash succeeds.
                assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
        }
+
+       #[test]
+       fn test_id_to_peer_coverage() {
+               // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+               // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
+               // the channel is successfully closed.
+               let chanmon_cfgs = create_chanmon_cfgs(2);
+               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+               nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+               let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel);
+               let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel);
+
+               let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+               let channel_id = &tx.txid().into_inner();
+               {
+                       // Ensure that the `id_to_peer` map is empty until either party has received the
+                       // funding transaction, and have the real `channel_id`.
+                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+
+               nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+               {
+                       // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+                       // as it has the funding transaction.
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+
+               let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+               {
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
+                       // as it has the funding transaction.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+               check_added_monitors!(nodes[1], 1);
+               let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+               check_added_monitors!(nodes[0], 1);
+               let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+               let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+               update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
+
+               nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+               let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown);
+
+               let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
+               {
+                       // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+                       // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
+                       // fee for the closing transaction has been negotiated and the parties has the other
+                       // party's signature for the fee negotiated closing transaction.)
+                       let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_0_lock.len(), 1);
+                       assert!(nodes_0_lock.contains_key(channel_id));
+
+                       // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
+                       // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
+                       // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
+                       // kept in the `nodes[1]`'s `id_to_peer` map.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
+               {
+                       // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
+                       // therefore has all it needs to fully close the channel (both signatures for the
+                       // closing transaction).
+                       // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+                       // fully closed by `nodes[0]`.
+                       assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+
+                       // Assert that the channel is still in `nodes[1]`'s  `id_to_peer` map, as `nodes[1]`
+                       // doesn't have `nodes[0]`'s signature for the closing transaction yet.
+                       let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+                       assert_eq!(nodes_1_lock.len(), 1);
+                       assert!(nodes_1_lock.contains_key(channel_id));
+               }
+
+               let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
+               {
+                       // Assert that the channel has now been removed from both parties `id_to_peer` map once
+                       // they both have everything required to fully close the channel.
+                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+               }
+               let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+
+               check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+               check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
+       }
 }
 
 #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
@@ -7687,7 +7865,7 @@ pub mod bench {
        use chain::Listen;
        use chain::chainmonitor::{ChainMonitor, Persist};
        use chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
-       use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
+       use ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
        use ln::features::{InitFeatures, InvoiceFeatures};
        use ln::functional_test_utils::*;
        use ln::msgs::{ChannelMessageHandler, Init};
@@ -7699,7 +7877,7 @@ pub mod bench {
 
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
-       use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+       use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
 
        use sync::{Arc, Mutex};
 
@@ -7753,15 +7931,15 @@ pub mod bench {
                });
                let node_b_holder = NodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
-               node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
-               node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+               node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+               node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
 
                let tx;
                if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
-                       tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                       tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
                                value: 8_000_000, script_pubkey: output_script,
                        }]};
                        node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
@@ -7773,7 +7951,7 @@ pub mod bench {
                assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
 
                let block = Block {
-                       header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![tx],
                };
                Listen::block_connected(&node_a, &block, 1);
@@ -7801,7 +7979,7 @@ pub mod bench {
                        ($node_a: expr, $node_b: expr) => {
                                let usable_channels = $node_a.list_usable_channels();
                                let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id())
-                                       .with_features(InvoiceFeatures::known());
+                                       .with_features(channelmanager::provided_invoice_features());
                                let scorer = test_utils::TestScorer::with_penalty(0);
                                let seed = [3u8; 32];
                                let keys_manager = KeysManager::new(&seed, 42, 42);