Merge pull request #2156 from alecchendev/2023-04-mpp-keysend
[rust-lightning] / lightning / src / ln / channelmanager.rs
index f489003029afd847f5314fb3dba4e2a721f57c4b..7c065f1ba44c4d956f68d791f52ad75c8135c74e 100644 (file)
@@ -19,7 +19,7 @@
 
 use bitcoin::blockdata::block::BlockHeader;
 use bitcoin::blockdata::transaction::Transaction;
-use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::blockdata::constants::{genesis_block, ChainHash};
 use bitcoin::network::constants::Network;
 
 use bitcoin::hashes::Hash;
@@ -40,7 +40,7 @@ use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, Messa
 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
 // construct one themselves.
 use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
+use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch};
 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
 #[cfg(any(feature = "_test_utils", test))]
 use crate::ln::features::InvoiceFeatures;
@@ -56,7 +56,7 @@ use crate::ln::outbound_payment;
 use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
 use crate::ln::wire::Encode;
 use crate::sign::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
-use crate::util::config::{UserConfig, ChannelConfig};
+use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
 use crate::util::wakers::{Future, Notifier};
 use crate::util::scid_utils::fake_scid;
 use crate::util::string::UntrustedString;
@@ -112,6 +112,8 @@ pub(super) enum PendingHTLCRouting {
                phantom_shared_secret: Option<[u8; 32]>,
        },
        ReceiveKeysend {
+               /// This was added in 0.0.116 and will break deserialization on downgrades.
+               payment_data: Option<msgs::FinalOnionHopData>,
                payment_preimage: PaymentPreimage,
                payment_metadata: Option<Vec<u8>>,
                incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
@@ -359,8 +361,6 @@ pub enum FailureCode {
        IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
 }
 
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
-
 /// Error type returned across the peer_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
 /// immediately (ie with no further calls on it made). Thus, this step happens inside a
@@ -497,15 +497,34 @@ struct ClaimablePayments {
        pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
 }
 
-/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_tick_occurred, so may be processed with
-/// quite some time lag.
+/// Events which we process internally but cannot be processed immediately at the generation site
+/// usually because we're running pre-full-init. They are handled immediately once we detect we are
+/// running normally, and specifically must be processed before any other non-background
+/// [`ChannelMonitorUpdate`]s are applied.
 enum BackgroundEvent {
-       /// Handle a ChannelMonitorUpdate
+       /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
+       /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the maybe-non-closing variant needs a public
+       /// key to handle channel resumption, whereas if the channel has been force-closed we do not
+       /// need the counterparty node_id.
+       ///
+       /// Note that any such events are lost on shutdown, so in general they must be updates which
+       /// are regenerated on startup.
+       ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
+       /// channel to continue normal operation.
+       ///
+       /// In general this should be used rather than
+       /// [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in cases where the
+       /// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
+       /// error the other variant is acceptable.
        ///
        /// Note that any such events are lost on shutdown, so in general they must be updates which
        /// are regenerated on startup.
-       MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
+       MonitorUpdateRegeneratedOnStartup {
+               counterparty_node_id: PublicKey,
+               funding_txo: OutPoint,
+               update: ChannelMonitorUpdate
+       },
 }
 
 #[derive(Debug)]
@@ -515,13 +534,31 @@ pub(crate) enum MonitorUpdateCompletionAction {
        /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
        /// event can be generated.
        PaymentClaimed { payment_hash: PaymentHash },
-       /// Indicates an [`events::Event`] should be surfaced to the user.
-       EmitEvent { event: events::Event },
+       /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
+       /// operation of another channel.
+       ///
+       /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
+       /// from completing a monitor update which removes the payment preimage until the inbound edge
+       /// completes a monitor update containing the payment preimage. In that case, after the inbound
+       /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
+       /// outbound edge.
+       EmitEventAndFreeOtherChannel {
+               event: events::Event,
+               downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
+       },
 }
 
 impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
        (0, PaymentClaimed) => { (0, payment_hash, required) },
-       (2, EmitEvent) => { (0, event, upgradable_required) },
+       (2, EmitEventAndFreeOtherChannel) => {
+               (0, event, upgradable_required),
+               // LDK prior to 0.0.116 did not have this field as the monitor update application order was
+               // required by clients. If we downgrade to something prior to 0.0.116 this may result in
+               // monitor updates which aren't properly blocked or resumed, however that's fine - we don't
+               // support async monitor updates even in LDK 0.0.116 and once we do we'll require no
+               // downgrades to prior versions.
+               (1, downstream_counterparty_and_funding_outpoint, option),
+       },
 );
 
 #[derive(Clone, Debug, PartialEq, Eq)]
@@ -538,6 +575,36 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
        };
 );
 
+#[derive(Clone, PartialEq, Eq, Debug)]
+/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
+/// the blocked action here. See enum variants for more info.
+pub(crate) enum RAAMonitorUpdateBlockingAction {
+       /// A forwarded payment was claimed. We block the downstream channel completing its monitor
+       /// update which removes the HTLC preimage until the upstream channel has gotten the preimage
+       /// durably to disk.
+       ForwardedPaymentInboundClaim {
+               /// The upstream channel ID (i.e. the inbound edge).
+               channel_id: [u8; 32],
+               /// The HTLC ID on the inbound edge.
+               htlc_id: u64,
+       },
+}
+
+impl RAAMonitorUpdateBlockingAction {
+       #[allow(unused)]
+       fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
+               Self::ForwardedPaymentInboundClaim {
+                       channel_id: prev_hop.outpoint.to_channel_id(),
+                       htlc_id: prev_hop.htlc_id,
+               }
+       }
+}
+
+impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
+       (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) }
+;);
+
+
 /// State we hold per-peer.
 pub(super) struct PeerState<Signer: ChannelSigner> {
        /// `temporary_channel_id` or `channel_id` -> `channel`.
@@ -566,6 +633,11 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
        /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
        /// duplicates do not occur, so such channels should fail without a monitor update completing.
        monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
+       /// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
+       /// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
+       /// will remove a preimage that needs to be durably in an upstream channel first), we put an
+       /// entry here to note that the channel with the key's ID is blocked on a set of actions.
+       actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
        /// The peer is currently connected (i.e. we've seen a
        /// [`ChannelMessageHandler::peer_connected`] and no corresponding
        /// [`ChannelMessageHandler::peer_disconnected`].
@@ -645,40 +717,44 @@ pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
 /// This is not exported to bindings users as Arcs don't make sense in bindings
 pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>, &'g L>;
 
+macro_rules! define_test_pub_trait { ($vis: vis) => {
 /// A trivial trait which describes any [`ChannelManager`] used in testing.
-#[cfg(any(test, feature = "_test_utils"))]
-pub trait AChannelManager {
-       type Watch: chain::Watch<Self::Signer>;
+$vis trait AChannelManager {
+       type Watch: chain::Watch<Self::Signer> + ?Sized;
        type M: Deref<Target = Self::Watch>;
-       type Broadcaster: BroadcasterInterface;
+       type Broadcaster: BroadcasterInterface + ?Sized;
        type T: Deref<Target = Self::Broadcaster>;
-       type EntropySource: EntropySource;
+       type EntropySource: EntropySource + ?Sized;
        type ES: Deref<Target = Self::EntropySource>;
-       type NodeSigner: NodeSigner;
+       type NodeSigner: NodeSigner + ?Sized;
        type NS: Deref<Target = Self::NodeSigner>;
-       type Signer: WriteableEcdsaChannelSigner;
-       type SignerProvider: SignerProvider<Signer = Self::Signer>;
+       type Signer: WriteableEcdsaChannelSigner + Sized;
+       type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
        type SP: Deref<Target = Self::SignerProvider>;
-       type FeeEstimator: FeeEstimator;
+       type FeeEstimator: FeeEstimator + ?Sized;
        type F: Deref<Target = Self::FeeEstimator>;
-       type Router: Router;
+       type Router: Router + ?Sized;
        type R: Deref<Target = Self::Router>;
-       type Logger: Logger;
+       type Logger: Logger + ?Sized;
        type L: Deref<Target = Self::Logger>;
        fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
 }
+} }
 #[cfg(any(test, feature = "_test_utils"))]
+define_test_pub_trait!(pub);
+#[cfg(not(any(test, feature = "_test_utils")))]
+define_test_pub_trait!(pub(crate));
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
 for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
-       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer> + Sized,
-       T::Target: BroadcasterInterface + Sized,
-       ES::Target: EntropySource + Sized,
-       NS::Target: NodeSigner + Sized,
-       SP::Target: SignerProvider + Sized,
-       F::Target: FeeEstimator + Sized,
-       R::Target: Router + Sized,
-       L::Target: Logger + Sized,
+       M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       T::Target: BroadcasterInterface,
+       ES::Target: EntropySource,
+       NS::Target: NodeSigner,
+       SP::Target: SignerProvider,
+       F::Target: FeeEstimator,
+       R::Target: Router,
+       L::Target: Logger,
 {
        type Watch = M::Target;
        type M = M;
@@ -964,7 +1040,18 @@ where
        pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
        /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
        pending_events_processor: AtomicBool,
+
+       /// If we are running during init (either directly during the deserialization method or in
+       /// block connection methods which run after deserialization but before normal operation) we
+       /// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
+       /// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
+       /// [`ChainMonitor`] and thus attempting to update it will fail or panic.
+       ///
+       /// Thus, we place them here to be handled as soon as possible once we are running normally.
+       ///
        /// See `ChannelManager` struct-level documentation for lock order requirements.
+       ///
+       /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
        pending_background_events: Mutex<Vec<BackgroundEvent>>,
        /// Used when we have to take a BIG lock to make sure everything is self-consistent.
        /// Essentially just when we're serializing ourselves out.
@@ -974,6 +1061,9 @@ where
        /// Notifier the lock contains sends out a notification when the lock is released.
        total_consistency_lock: RwLock<()>,
 
+       #[cfg(debug_assertions)]
+       background_events_processed_since_startup: AtomicBool,
+
        persistence_notifier: Notifier,
 
        entropy_source: ES,
@@ -1000,6 +1090,7 @@ pub struct ChainParameters {
 }
 
 #[derive(Copy, Clone, PartialEq)]
+#[must_use]
 enum NotifyOption {
        DoPersist,
        SkipPersist,
@@ -1023,10 +1114,20 @@ struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
 }
 
 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
-       fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
-               PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
+       fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+               let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
+               let _ = cm.get_cm().process_background_events(); // We always persist
+
+               PersistenceNotifierGuard {
+                       persistence_notifier: &cm.get_cm().persistence_notifier,
+                       should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+                       _read_guard: read_guard,
+               }
+
        }
 
+       /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
+       /// [`ChannelManager::process_background_events`] MUST be called first.
        fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
                let read_guard = lock.read().unwrap();
 
@@ -1274,8 +1375,14 @@ pub struct ChannelDetails {
        /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
        /// to use a limit as close as possible to the HTLC limit we can currently send.
        ///
-       /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
+       /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`],
+       /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`].
        pub next_outbound_htlc_limit_msat: u64,
+       /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of
+       /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than
+       /// an upper-bound. This is intended for use when routing, allowing us to ensure we pick a
+       /// route which is valid.
+       pub next_outbound_htlc_minimum_msat: u64,
        /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
        /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
        /// available for inclusion in new inbound HTLCs).
@@ -1395,6 +1502,7 @@ impl ChannelDetails {
                        inbound_capacity_msat: balance.inbound_capacity_msat,
                        outbound_capacity_msat: balance.outbound_capacity_msat,
                        next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
+                       next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
                        user_channel_id: channel.get_user_id(),
                        confirmations_required: channel.minimum_depth(),
                        confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
@@ -1690,6 +1798,9 @@ macro_rules! handle_new_monitor_update {
                // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
                // any case so that it won't deadlock.
                debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
+               #[cfg(debug_assertions)] {
+                       debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
+               }
                match $update_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
@@ -1736,6 +1847,10 @@ macro_rules! process_events_body {
                                // persists happen while processing monitor events.
                                let _read_guard = $self.total_consistency_lock.read().unwrap();
 
+                               // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
+                               // ensure any startup-generated background events are handled first.
+                               if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+
                                // TODO: This behavior should be documented. It's unintuitive that we query
                                // ChannelMonitors when clearing other events.
                                if $self.process_pending_monitor_events() {
@@ -1845,6 +1960,8 @@ where
                        pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
+                       #[cfg(debug_assertions)]
+                       background_events_processed_since_startup: AtomicBool::new(false),
                        persistence_notifier: Notifier::new(),
 
                        entropy_source,
@@ -1913,7 +2030,7 @@ where
                        return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
                debug_assert!(&self.total_consistency_lock.try_write().is_err());
 
@@ -2067,7 +2184,7 @@ where
        }
 
        fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
                let result: Result<(), _> = loop {
@@ -2197,7 +2314,7 @@ where
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
                        self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
                }
-               if let Some((funding_txo, monitor_update)) = monitor_update_option {
+               if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -2240,7 +2357,7 @@ where
        }
 
        fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
                        Ok(counterparty_node_id) => {
                                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -2342,20 +2459,7 @@ where
                                });
                        },
                        msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage, payment_metadata } => {
-                               if payment_data.is_some() && keysend_preimage.is_some() {
-                                       return Err(ReceiveError {
-                                               err_code: 0x4000|22,
-                                               err_data: Vec::new(),
-                                               msg: "We don't support MPP keysend payments",
-                                       });
-                               } else if let Some(data) = payment_data {
-                                       PendingHTLCRouting::Receive {
-                                               payment_data: data,
-                                               payment_metadata,
-                                               incoming_cltv_expiry: hop_data.outgoing_cltv_value,
-                                               phantom_shared_secret,
-                                       }
-                               } else if let Some(payment_preimage) = keysend_preimage {
+                               if let Some(payment_preimage) = keysend_preimage {
                                        // We need to check that the sender knows the keysend preimage before processing this
                                        // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
                                        // could discover the final destination of X, by probing the adjacent nodes on the route
@@ -2369,12 +2473,26 @@ where
                                                        msg: "Payment preimage didn't match payment hash",
                                                });
                                        }
-
+                                       if !self.default_configuration.accept_mpp_keysend && payment_data.is_some() {
+                                               return Err(ReceiveError {
+                                                       err_code: 0x4000|22,
+                                                       err_data: Vec::new(),
+                                                       msg: "We don't support MPP keysend payments",
+                                               });
+                                       }
                                        PendingHTLCRouting::ReceiveKeysend {
+                                               payment_data,
                                                payment_preimage,
                                                payment_metadata,
                                                incoming_cltv_expiry: hop_data.outgoing_cltv_value,
                                        }
+                               } else if let Some(data) = payment_data {
+                                       PendingHTLCRouting::Receive {
+                                               payment_data: data,
+                                               payment_metadata,
+                                               incoming_cltv_expiry: hop_data.outgoing_cltv_value,
+                                               phantom_shared_secret,
+                                       }
                                } else {
                                        return Err(ReceiveError {
                                                err_code: 0x4000|0x2000|3,
@@ -2849,18 +2967,18 @@ where
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
        pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
                                |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
                                self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        }
 
-       /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
+       /// Similar to [`ChannelManager::send_payment_with_route`], but will automatically find a route based on
        /// `route_params` and retry failed payment paths based on `retry_strategy`.
        pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments
                        .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
                                &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
@@ -2873,7 +2991,7 @@ where
        #[cfg(test)]
        pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
                        |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
                        self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
@@ -2908,7 +3026,7 @@ where
        /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
        /// [`Event::PaymentSent`]: events::Event::PaymentSent
        pub fn abandon_payment(&self, payment_id: PaymentId) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
        }
 
@@ -2924,12 +3042,10 @@ where
        /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
        /// [`send_payment`] for more information about the risks of duplicate preimage usage.
        ///
-       /// Note that `route` must have exactly one path.
-       ///
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment_with_route(
                        route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
                        &self.node_signer, best_block_height,
@@ -2946,7 +3062,7 @@ where
        /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
        pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
                        payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
                        || self.compute_inflight_htlcs(),  &self.entropy_source, &self.node_signer, best_block_height,
@@ -2960,7 +3076,7 @@ where
        /// us to easily discern them from real payments.
        pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
                        |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
                        self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
@@ -3071,7 +3187,7 @@ where
        /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
        /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
        pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                for inp in funding_transaction.input.iter() {
                        if inp.witness.is_empty() {
@@ -3120,7 +3236,7 @@ where
                })
        }
 
-       /// Atomically updates the [`ChannelConfig`] for the given channels.
+       /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
        ///
        /// Once the updates are applied, each eligible channel (advertised with a known short channel
        /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
@@ -3142,18 +3258,16 @@ where
        /// [`ChannelUpdate`]: msgs::ChannelUpdate
        /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
        /// [`APIMisuseError`]: APIError::APIMisuseError
-       pub fn update_channel_config(
-               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+       pub fn update_partial_channel_config(
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config_update: &ChannelConfigUpdate,
        ) -> Result<(), APIError> {
-               if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
+               if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
                        return Err(APIError::APIMisuseError {
                                err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
                        });
                }
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
-                       &self.total_consistency_lock, &self.persistence_notifier,
-               );
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
@@ -3168,7 +3282,9 @@ where
                }
                for channel_id in channel_ids {
                        let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
-                       if !channel.update_config(config) {
+                       let mut config = channel.config();
+                       config.apply(config_update);
+                       if !channel.update_config(&config) {
                                continue;
                        }
                        if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
@@ -3183,6 +3299,34 @@ where
                Ok(())
        }
 
+       /// Atomically updates the [`ChannelConfig`] for the given channels.
+       ///
+       /// Once the updates are applied, each eligible channel (advertised with a known short channel
+       /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
+       /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
+       /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
+       ///
+       /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
+       /// `counterparty_node_id` is provided.
+       ///
+       /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
+       /// below [`MIN_CLTV_EXPIRY_DELTA`].
+       ///
+       /// If an error is returned, none of the updates should be considered applied.
+       ///
+       /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
+       /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
+       /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
+       /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
+       /// [`ChannelUpdate`]: msgs::ChannelUpdate
+       /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
+       /// [`APIMisuseError`]: APIError::APIMisuseError
+       pub fn update_channel_config(
+               &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+       ) -> Result<(), APIError> {
+               return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
+       }
+
        /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
        /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
        ///
@@ -3206,7 +3350,7 @@ where
        // TODO: when we move to deciding the best outbound channel at forward time, only take
        // `next_node_id` and not `next_hop_channel_id`
        pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let next_hop_scid = {
                        let peer_state_lock = self.per_peer_state.read().unwrap();
@@ -3262,7 +3406,7 @@ where
        ///
        /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
        pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
                        .ok_or_else(|| APIError::APIMisuseError {
@@ -3291,7 +3435,7 @@ where
        /// Should only really ever be called in response to a PendingHTLCsForwardable event.
        /// Will likely generate further events.
        pub fn process_pending_htlc_forwards(&self) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut new_events = VecDeque::new();
                let mut failed_forwards = Vec::new();
@@ -3492,16 +3636,19 @@ where
                                                                                (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
                                                                                        Some(payment_data), phantom_shared_secret, onion_fields)
                                                                        },
-                                                                       PendingHTLCRouting::ReceiveKeysend { payment_preimage, payment_metadata, incoming_cltv_expiry } => {
-                                                                               let onion_fields = RecipientOnionFields { payment_secret: None, payment_metadata };
+                                                                       PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry } => {
+                                                                               let onion_fields = RecipientOnionFields {
+                                                                                       payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
+                                                                                       payment_metadata
+                                                                               };
                                                                                (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
-                                                                                       None, None, onion_fields)
+                                                                                       payment_data, None, onion_fields)
                                                                        },
                                                                        _ => {
                                                                                panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
                                                                        }
                                                                };
-                                                               let mut claimable_htlc = ClaimableHTLC {
+                                                               let claimable_htlc = ClaimableHTLC {
                                                                        prev_hop: HTLCPreviousHopData {
                                                                                short_channel_id: prev_short_channel_id,
                                                                                outpoint: prev_funding_outpoint,
@@ -3551,13 +3698,11 @@ where
                                                                }
 
                                                                macro_rules! check_total_value {
-                                                                       ($payment_data: expr, $payment_preimage: expr) => {{
+                                                                       ($purpose: expr) => {{
                                                                                let mut payment_claimable_generated = false;
-                                                                               let purpose = || {
-                                                                                       events::PaymentPurpose::InvoicePayment {
-                                                                                               payment_preimage: $payment_preimage,
-                                                                                               payment_secret: $payment_data.payment_secret,
-                                                                                       }
+                                                                               let is_keysend = match $purpose {
+                                                                                       events::PaymentPurpose::SpontaneousPayment(_) => true,
+                                                                                       events::PaymentPurpose::InvoicePayment { .. } => false,
                                                                                };
                                                                                let mut claimable_payments = self.claimable_payments.lock().unwrap();
                                                                                if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
@@ -3569,9 +3714,18 @@ where
                                                                                        .or_insert_with(|| {
                                                                                                committed_to_claimable = true;
                                                                                                ClaimablePayment {
-                                                                                                       purpose: purpose(), htlcs: Vec::new(), onion_fields: None,
+                                                                                                       purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
                                                                                                }
                                                                                        });
+                                                                               if $purpose != claimable_payment.purpose {
+                                                                                       let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
+                                                                                       log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), log_bytes!(payment_hash.0), log_keysend(!is_keysend));
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                               }
+                                                                               if !self.default_configuration.accept_mpp_keysend && is_keysend && !claimable_payment.htlcs.is_empty() {
+                                                                                       log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash and our config states we don't accept MPP keysend", log_bytes!(payment_hash.0));
+                                                                                       fail_htlc!(claimable_htlc, payment_hash);
+                                                                               }
                                                                                if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
                                                                                        if earlier_fields.check_merge(&mut onion_fields).is_err() {
                                                                                                fail_htlc!(claimable_htlc, payment_hash);
@@ -3580,38 +3734,27 @@ where
                                                                                        claimable_payment.onion_fields = Some(onion_fields);
                                                                                }
                                                                                let ref mut htlcs = &mut claimable_payment.htlcs;
-                                                                               if htlcs.len() == 1 {
-                                                                                       if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
-                                                                                               log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
-                                                                                               fail_htlc!(claimable_htlc, payment_hash);
-                                                                                       }
-                                                                               }
                                                                                let mut total_value = claimable_htlc.sender_intended_value;
                                                                                let mut earliest_expiry = claimable_htlc.cltv_expiry;
                                                                                for htlc in htlcs.iter() {
                                                                                        total_value += htlc.sender_intended_value;
                                                                                        earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
-                                                                                       match &htlc.onion_payload {
-                                                                                               OnionPayload::Invoice { .. } => {
-                                                                                                       if htlc.total_msat != $payment_data.total_msat {
-                                                                                                               log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
-                                                                                                                       log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
-                                                                                                               total_value = msgs::MAX_VALUE_MSAT;
-                                                                                                       }
-                                                                                                       if total_value >= msgs::MAX_VALUE_MSAT { break; }
-                                                                                               },
-                                                                                               _ => unreachable!(),
+                                                                                       if htlc.total_msat != claimable_htlc.total_msat {
+                                                                                               log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+                                                                                                       log_bytes!(payment_hash.0), claimable_htlc.total_msat, htlc.total_msat);
+                                                                                               total_value = msgs::MAX_VALUE_MSAT;
                                                                                        }
+                                                                                       if total_value >= msgs::MAX_VALUE_MSAT { break; }
                                                                                }
                                                                                // The condition determining whether an MPP is complete must
                                                                                // match exactly the condition used in `timer_tick_occurred`
                                                                                if total_value >= msgs::MAX_VALUE_MSAT {
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
+                                                                               } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
                                                                                        log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
                                                                                                log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               } else if total_value >= $payment_data.total_msat {
+                                                                               } else if total_value >= claimable_htlc.total_msat {
                                                                                        #[allow(unused_assignments)] {
                                                                                                committed_to_claimable = true;
                                                                                        }
@@ -3622,7 +3765,7 @@ where
                                                                                        new_events.push_back((events::Event::PaymentClaimable {
                                                                                                receiver_node_id: Some(receiver_node_id),
                                                                                                payment_hash,
-                                                                                               purpose: purpose(),
+                                                                                               purpose: $purpose,
                                                                                                amount_msat,
                                                                                                via_channel_id: Some(prev_channel_id),
                                                                                                via_user_channel_id: Some(prev_user_channel_id),
@@ -3670,49 +3813,23 @@ where
                                                                                                                fail_htlc!(claimable_htlc, payment_hash);
                                                                                                        }
                                                                                                }
-                                                                                               check_total_value!(payment_data, payment_preimage);
+                                                                                               let purpose = events::PaymentPurpose::InvoicePayment {
+                                                                                                       payment_preimage: payment_preimage.clone(),
+                                                                                                       payment_secret: payment_data.payment_secret,
+                                                                                               };
+                                                                                               check_total_value!(purpose);
                                                                                        },
                                                                                        OnionPayload::Spontaneous(preimage) => {
-                                                                                               let mut claimable_payments = self.claimable_payments.lock().unwrap();
-                                                                                               if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
-                                                                                                       fail_htlc!(claimable_htlc, payment_hash);
-                                                                                               }
-                                                                                               match claimable_payments.claimable_payments.entry(payment_hash) {
-                                                                                                       hash_map::Entry::Vacant(e) => {
-                                                                                                               let amount_msat = claimable_htlc.value;
-                                                                                                               claimable_htlc.total_value_received = Some(amount_msat);
-                                                                                                               let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
-                                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
-                                                                                                               e.insert(ClaimablePayment {
-                                                                                                                       purpose: purpose.clone(),
-                                                                                                                       onion_fields: Some(onion_fields.clone()),
-                                                                                                                       htlcs: vec![claimable_htlc],
-                                                                                                               });
-                                                                                                               let prev_channel_id = prev_funding_outpoint.to_channel_id();
-                                                                                                               new_events.push_back((events::Event::PaymentClaimable {
-                                                                                                                       receiver_node_id: Some(receiver_node_id),
-                                                                                                                       payment_hash,
-                                                                                                                       amount_msat,
-                                                                                                                       purpose,
-                                                                                                                       via_channel_id: Some(prev_channel_id),
-                                                                                                                       via_user_channel_id: Some(prev_user_channel_id),
-                                                                                                                       claim_deadline,
-                                                                                                                       onion_fields: Some(onion_fields),
-                                                                                                               }, None));
-                                                                                                       },
-                                                                                                       hash_map::Entry::Occupied(_) => {
-                                                                                                               log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
-                                                                                                               fail_htlc!(claimable_htlc, payment_hash);
-                                                                                                       }
-                                                                                               }
+                                                                                               let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+                                                                                               check_total_value!(purpose);
                                                                                        }
                                                                                }
                                                                        },
                                                                        hash_map::Entry::Occupied(inbound_payment) => {
-                                                                               if payment_data.is_none() {
+                                                                               if let OnionPayload::Spontaneous(_) = claimable_htlc.onion_payload {
                                                                                        log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
-                                                                               };
+                                                                               }
                                                                                let payment_data = payment_data.unwrap();
                                                                                if inbound_payment.get().payment_secret != payment_data.payment_secret {
                                                                                        log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
@@ -3722,7 +3839,11 @@ where
                                                                                                log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
                                                                                        fail_htlc!(claimable_htlc, payment_hash);
                                                                                } else {
-                                                                                       let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
+                                                                                       let purpose = events::PaymentPurpose::InvoicePayment {
+                                                                                               payment_preimage: inbound_payment.get().payment_preimage,
+                                                                                               payment_secret: payment_data.payment_secret,
+                                                                                       };
+                                                                                       let payment_claimable_generated = check_total_value!(purpose);
                                                                                        if payment_claimable_generated {
                                                                                                inbound_payment.remove_entry();
                                                                                        }
@@ -3762,35 +3883,63 @@ where
                events.append(&mut new_events);
        }
 
-       /// Free the background events, generally called from timer_tick_occurred.
-       ///
-       /// Exposed for testing to allow us to process events quickly without generating accidental
-       /// BroadcastChannelUpdate events in timer_tick_occurred.
+       /// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
        ///
        /// Expects the caller to have a total_consistency_lock read lock.
-       fn process_background_events(&self) -> bool {
+       fn process_background_events(&self) -> NotifyOption {
+               debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
+
+               #[cfg(debug_assertions)]
+               self.background_events_processed_since_startup.store(true, Ordering::Release);
+
                let mut background_events = Vec::new();
                mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
                if background_events.is_empty() {
-                       return false;
+                       return NotifyOption::SkipPersist;
                }
 
                for event in background_events.drain(..) {
                        match event {
-                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
+                               BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
                                        // The channel has already been closed, so no use bothering to care about the
                                        // monitor updating completing.
                                        let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                },
+                               BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
+                                       let update_res = self.chain_monitor.update_channel(funding_txo, &update);
+
+                                       let res = {
+                                               let per_peer_state = self.per_peer_state.read().unwrap();
+                                               if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+                                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                                       let peer_state = &mut *peer_state_lock;
+                                                       match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
+                                                               hash_map::Entry::Occupied(mut chan) => {
+                                                                       handle_new_monitor_update!(self, update_res, update.update_id, peer_state_lock, peer_state, per_peer_state, chan)
+                                                               },
+                                                               hash_map::Entry::Vacant(_) => Ok(()),
+                                                       }
+                                               } else { Ok(()) }
+                                       };
+                                       // TODO: If this channel has since closed, we're likely providing a payment
+                                       // preimage update, which we must ensure is durable! We currently don't,
+                                       // however, ensure that.
+                                       if res.is_err() {
+                                               log_error!(self.logger,
+                                                       "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
+                                       }
+                                       let _ = handle_error!(self, res, counterparty_node_id);
+                               },
                        }
                }
-               true
+               NotifyOption::DoPersist
        }
 
        #[cfg(any(test, feature = "_test_utils"))]
        /// Process background events, for functional testing
        pub fn test_process_background_events(&self) {
-               self.process_background_events();
+               let _lck = self.total_consistency_lock.read().unwrap();
+               let _ = self.process_background_events();
        }
 
        fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
@@ -3820,7 +3969,7 @@ where
        /// it wants to detect). Thus, we have a variant exposed here for its benefit.
        pub fn maybe_update_chan_fees(&self) {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = NotifyOption::SkipPersist;
+                       let mut should_persist = self.process_background_events();
 
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
@@ -3856,8 +4005,7 @@ where
        /// [`ChannelConfig`]: crate::util::config::ChannelConfig
        pub fn timer_tick_occurred(&self) {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = NotifyOption::SkipPersist;
-                       if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
+                       let mut should_persist = self.process_background_events();
 
                        let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
 
@@ -4043,7 +4191,7 @@ where
        ///
        /// See [`FailureCode`] for valid failure codes.
        pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
                if let Some(payment) = removed_source {
@@ -4220,7 +4368,7 @@ where
        pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
                let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut sources = {
                        let mut claimable_payments = self.claimable_payments.lock().unwrap();
@@ -4275,18 +4423,6 @@ where
                                break;
                        }
                        expected_amt_msat = htlc.total_value_received;
-
-                       if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
-                               // We don't currently support MPP for spontaneous payments, so just check
-                               // that there's one payment here and move on.
-                               if sources.len() != 1 {
-                                       log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
-                                       debug_assert!(false);
-                                       valid_mpp = false;
-                                       break;
-                               }
-                       }
-
                        claimable_amt_msat += htlc.value;
                }
                mem::drop(per_peer_state);
@@ -4425,16 +4561,16 @@ where
                                                                Some(claimed_htlc_value - forwarded_htlc_value)
                                                        } else { None };
 
-                                                       let prev_channel_id = Some(prev_outpoint.to_channel_id());
-                                                       let next_channel_id = Some(next_channel_id);
-
-                                                       Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
-                                                               fee_earned_msat,
-                                                               claim_from_onchain_tx: from_onchain,
-                                                               prev_channel_id,
-                                                               next_channel_id,
-                                                               outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
-                                                       }})
+                                                       Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+                                                               event: events::Event::PaymentForwarded {
+                                                                       fee_earned_msat,
+                                                                       claim_from_onchain_tx: from_onchain,
+                                                                       prev_channel_id: Some(prev_outpoint.to_channel_id()),
+                                                                       next_channel_id: Some(next_channel_id),
+                                                                       outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+                                                               },
+                                                               downstream_counterparty_and_funding_outpoint: None,
+                                                       })
                                                } else { None }
                                        });
                                if let Err((pk, err)) = res {
@@ -4461,8 +4597,13 @@ where
                                                }, None));
                                        }
                                },
-                               MonitorUpdateCompletionAction::EmitEvent { event } => {
+                               MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+                                       event, downstream_counterparty_and_funding_outpoint
+                               } => {
                                        self.pending_events.lock().unwrap().push_back((event, None));
+                                       if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
+                                               self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
+                                       }
                                },
                        }
                }
@@ -4621,7 +4762,7 @@ where
        }
 
        fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
                let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5309,6 +5450,24 @@ where
                }
        }
 
+       /// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
+       /// [`msgs::RevokeAndACK`] should be held for the given channel until some other event
+       /// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
+       /// the [`ChannelMonitorUpdate`] in question.
+       fn raa_monitor_updates_held(&self,
+               actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
+               channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
+       ) -> bool {
+               actions_blocking_raa_monitor_updates
+                       .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
+               || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
+                       action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
+                               channel_funding_outpoint,
+                               counterparty_node_id,
+                       })
+               })
+       }
+
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
                let (htlcs_to_fail, res) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5568,13 +5727,8 @@ where
        /// update events as a separate process method here.
        #[cfg(fuzzing)]
        pub fn process_monitor_events(&self) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       if self.process_pending_monitor_events() {
-                               NotifyOption::DoPersist
-                       } else {
-                               NotifyOption::SkipPersist
-                       }
-               });
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               self.process_pending_monitor_events();
        }
 
        /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
@@ -5706,12 +5860,15 @@ where
                        // Channel::force_shutdown tries to make us do) as we may still be in initialization,
                        // so we track the update internally and handle it when the user next calls
                        // timer_tick_occurred, guaranteeing we're running normally.
-                       if let Some((funding_txo, update)) = failure.0.take() {
+                       if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
                                assert_eq!(update.updates.len(), 1);
                                if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
                                        assert!(should_broadcast);
                                } else { unreachable!(); }
-                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
+                               self.pending_background_events.lock().unwrap().push(
+                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                               counterparty_node_id, funding_txo, update
+                                       });
                        }
                        self.finish_force_close_channel(failure);
                }
@@ -5726,7 +5883,7 @@ where
 
                let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes());
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
                match payment_secrets.entry(payment_hash) {
                        hash_map::Entry::Vacant(e) => {
@@ -5975,25 +6132,37 @@ where
                self.pending_outbound_payments.clear_pending_payments()
        }
 
-       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) {
+       /// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
+       /// [`Event`] being handled) completes, this should be called to restore the channel to normal
+       /// operation. It will double-check that nothing *else* is also blocking the same channel from
+       /// making progress and then any blocked [`ChannelMonitorUpdate`]s fly.
+       fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
                let mut errors = Vec::new();
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
                                let mut peer_state_lck = peer_state_mtx.lock().unwrap();
                                let peer_state = &mut *peer_state_lck;
-                               if self.pending_events.lock().unwrap().iter()
-                                       .any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
-                                               channel_funding_outpoint, counterparty_node_id
-                                       }))
-                               {
-                                       // Check that, while holding the peer lock, we don't have another event
-                                       // blocking any monitor updates for this channel. If we do, let those
-                                       // events be the ones that ultimately release the monitor update(s).
-                                       log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending",
+
+                               if let Some(blocker) = completed_blocker.take() {
+                                       // Only do this on the first iteration of the loop.
+                                       if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
+                                               .get_mut(&channel_funding_outpoint.to_channel_id())
+                                       {
+                                               blockers.retain(|iter| iter != &blocker);
+                                       }
+                               }
+
+                               if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+                                       channel_funding_outpoint, counterparty_node_id) {
+                                       // Check that, while holding the peer lock, we don't have anything else
+                                       // blocking monitor updates for this channel. If we do, release the monitor
+                                       // update(s) when those blockers complete.
+                                       log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
                                                log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
                                        break;
                                }
+
                                if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
                                        debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
                                        if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
@@ -6035,7 +6204,7 @@ where
                                EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
                                        channel_funding_outpoint, counterparty_node_id
                                } => {
-                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint);
+                                       self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
                                }
                        }
                }
@@ -6080,7 +6249,7 @@ where
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut result = NotifyOption::SkipPersist;
+                       let mut result = self.process_background_events();
 
                        // TODO: This behavior should be documented. It's unintuitive that we query
                        // ChannelMonitors when clearing other events.
@@ -6161,7 +6330,8 @@ where
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                let new_height = height - 1;
                {
                        let mut best_block = self.best_block.write().unwrap();
@@ -6195,7 +6365,8 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
 
@@ -6214,8 +6385,8 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
                self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
@@ -6258,7 +6429,8 @@ where
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
+                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.get_funding_txo() {
                                if funding_txo.txid == *txid {
@@ -6502,7 +6674,7 @@ where
        L::Target: Logger,
 {
        fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
        }
 
@@ -6513,7 +6685,7 @@ where
        }
 
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
        }
 
@@ -6524,74 +6696,75 @@ where
        }
 
        fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
                PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       let force_persist = self.process_background_events();
                        if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
-                               persist
+                               if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
                        } else {
                                NotifyOption::SkipPersist
                        }
@@ -6599,12 +6772,12 @@ where
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
        }
 
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
@@ -6686,7 +6859,7 @@ where
                        return Err(());
                }
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                // If we have too many peers connected which don't have funded channels, disconnect the
                // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
@@ -6707,6 +6880,7 @@ where
                                                latest_features: init_msg.features.clone(),
                                                pending_msg_events: Vec::new(),
                                                monitor_update_blocked_actions: BTreeMap::new(),
+                                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
                                                is_connected: true,
                                        }));
                                },
@@ -6769,7 +6943,7 @@ where
        }
 
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                if msg.channel_id == [0; 32] {
                        let channel_ids: Vec<[u8; 32]> = {
@@ -6816,6 +6990,10 @@ where
                provided_init_features(&self.default_configuration)
        }
 
+       fn get_genesis_hashes(&self) -> Option<Vec<ChainHash>> {
+               Some(vec![ChainHash::from(&self.genesis_hash[..])])
+       }
+
        fn handle_tx_add_input(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAddInput) {
                let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
                        "Dual-funded channels not supported".to_owned(),
@@ -6965,10 +7143,9 @@ impl Writeable for ChannelDetails {
                        (14, user_channel_id_low, required),
                        (16, self.balance_msat, required),
                        (18, self.outbound_capacity_msat, required),
-                       // Note that by the time we get past the required read above, outbound_capacity_msat will be
-                       // filled in, so we can safely unwrap it here.
-                       (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
+                       (19, self.next_outbound_htlc_limit_msat, required),
                        (20, self.inbound_capacity_msat, required),
+                       (21, self.next_outbound_htlc_minimum_msat, required),
                        (22, self.confirmations_required, option),
                        (24, self.force_close_spend_delay, option),
                        (26, self.is_outbound, required),
@@ -7005,6 +7182,7 @@ impl Readable for ChannelDetails {
                        // filled in, so we can safely unwrap it here.
                        (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
                        (20, inbound_capacity_msat, required),
+                       (21, next_outbound_htlc_minimum_msat, (default_value, 0)),
                        (22, confirmations_required, option),
                        (24, force_close_spend_delay, option),
                        (26, is_outbound, required),
@@ -7038,6 +7216,7 @@ impl Readable for ChannelDetails {
                        balance_msat: balance_msat.0.unwrap(),
                        outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
                        next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
+                       next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(),
                        inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
                        confirmations_required,
                        confirmations,
@@ -7074,6 +7253,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting,
                (0, payment_preimage, required),
                (2, incoming_cltv_expiry, required),
                (3, payment_metadata, option),
+               (4, payment_data, option), // Added in 0.0.116
        },
 ;);
 
@@ -7784,8 +7964,10 @@ where
                                        log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
                                                log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
                                        let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
-                                       if let Some(monitor_update) = monitor_update {
-                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
+                                       if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
+                                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                       counterparty_node_id, funding_txo, update
+                                               });
                                        }
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        channel_closures.push_back((events::Event::ChannelClosed {
@@ -7813,7 +7995,10 @@ where
                                                }
                                        }
                                } else {
-                                       log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
+                                       log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
+                                               log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(),
+                                               monitor.get_latest_update_id());
+                                       channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
                                        if let Some(short_channel_id) = channel.get_short_channel_id() {
                                                short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
                                        }
@@ -7860,7 +8045,7 @@ where
                                        update_id: CLOSED_CHANNEL_UPDATE_ID,
                                        updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
                                };
-                               pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
+                               pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
                        }
                }
 
@@ -7898,6 +8083,7 @@ where
                                latest_features: Readable::read(reader)?,
                                pending_msg_events: Vec::new(),
                                monitor_update_blocked_actions: BTreeMap::new(),
+                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
                                is_connected: false,
                        };
                        per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -7927,6 +8113,24 @@ where
                        }
                }
 
+               for (node_id, peer_mtx) in per_peer_state.iter() {
+                       let peer_state = peer_mtx.lock().unwrap();
+                       for (_, chan) in peer_state.channel_by_id.iter() {
+                               for update in chan.uncompleted_unblocked_mon_updates() {
+                                       if let Some(funding_txo) = chan.get_funding_txo() {
+                                               log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
+                                                       update.update_id, log_bytes!(funding_txo.to_channel_id()));
+                                               pending_background_events.push(
+                                                       BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+                                                               counterparty_node_id: *node_id, funding_txo, update: update.clone(),
+                                                       });
+                                       } else {
+                                               return Err(DecodeError::InvalidValue);
+                                       }
+                               }
+                       }
+               }
+
                let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
                let highest_seen_timestamp: u32 = Readable::read(reader)?;
 
@@ -7961,7 +8165,7 @@ where
                let mut claimable_htlc_purposes = None;
                let mut claimable_htlc_onion_fields = None;
                let mut pending_claiming_payments = Some(HashMap::new());
-               let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
+               let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
                let mut events_override = None;
                read_tlv_fields!(reader, {
                        (1, pending_outbound_payments_no_retry, option),
@@ -8286,7 +8490,21 @@ where
                }
 
                for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
-                       if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
+                       if let Some(peer_state) = per_peer_state.get(&node_id) {
+                               for (_, actions) in monitor_update_blocked_actions.iter() {
+                                       for action in actions.iter() {
+                                               if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
+                                                       downstream_counterparty_and_funding_outpoint:
+                                                               Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
+                                               } = action {
+                                                       if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
+                                                               blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
+                                                                       .entry(blocked_channel_outpoint.to_channel_id())
+                                                                       .or_insert_with(Vec::new).push(blocking_action.clone());
+                                                       }
+                                               }
+                                       }
+                               }
                                peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
                        } else {
                                log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
@@ -8328,6 +8546,8 @@ where
                        pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
+                       #[cfg(debug_assertions)]
+                       background_events_processed_since_startup: AtomicBool::new(false),
                        persistence_notifier: Notifier::new(),
 
                        entropy_source: args.entropy_source,
@@ -8367,7 +8587,7 @@ mod tests {
        use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
        use crate::util::errors::APIError;
        use crate::util::test_utils;
-       use crate::util::config::ChannelConfig;
+       use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
        use crate::sign::EntropySource;
 
        #[test]
@@ -8576,13 +8796,26 @@ mod tests {
 
        #[test]
        fn test_keysend_dup_payment_hash() {
+               do_test_keysend_dup_payment_hash(false);
+               do_test_keysend_dup_payment_hash(true);
+       }
+
+       fn do_test_keysend_dup_payment_hash(accept_mpp_keysend: bool) {
                // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
                //      outbound regular payment fails as expected.
                // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
                //      fails as expected.
+               // (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
+               //      payment fails as expected. When `accept_mpp_keysend` is false, this tests that we
+               //      reject MPP keysend payments, since in this case where the payment has no payment
+               //      secret, a keysend payment with a duplicate hash is basically an MPP keysend. If
+               //      `accept_mpp_keysend` is true, this tests that we only accept MPP keysends with
+               //      payment secrets and reject otherwise.
                let chanmon_cfgs = create_chanmon_cfgs(2);
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let mut mpp_keysend_cfg = test_default_channel_config();
+               mpp_keysend_cfg.accept_mpp_keysend = accept_mpp_keysend;
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(mpp_keysend_cfg)]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
                create_announced_chan_between_nodes(&nodes, 0, 1);
                let scorer = test_utils::TestScorer::new();
@@ -8594,7 +8827,7 @@ mod tests {
 
                // Next, attempt a keysend payment and make sure it fails.
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
+                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
                        final_value_msat: 100_000,
                };
                let route = find_route(
@@ -8671,6 +8904,53 @@ mod tests {
 
                // Finally, succeed the keysend payment.
                claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+               // To start (3), send a keysend payment but don't claim it.
+               let payment_id_1 = PaymentId([44; 32]);
+               let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+                       RecipientOnionFields::spontaneous_empty(), payment_id_1).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let event = events.pop().unwrap();
+               let path = vec![&nodes[1]];
+               pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+               // Next, attempt a keysend payment and make sure it fails.
+               let route_params = RouteParameters {
+                       payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
+                       final_value_msat: 100_000,
+               };
+               let route = find_route(
+                       &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+               ).unwrap();
+               let payment_id_2 = PaymentId([45; 32]);
+               nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
+                       RecipientOnionFields::spontaneous_empty(), payment_id_2).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               let ev = events.drain(..).next().unwrap();
+               let payment_event = SendEvent::from_event(ev);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+               check_added_monitors!(nodes[1], 0);
+               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+               expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
+               check_added_monitors!(nodes[1], 1);
+               let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates.update_fail_htlcs.len(), 1);
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+               commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+               expect_payment_failed!(nodes[0], payment_hash, true);
+
+               // Finally, claim the original payment.
+               claim_payment(&nodes[0], &expected_route, payment_preimage);
        }
 
        #[test]
@@ -8687,7 +8967,7 @@ mod tests {
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                        final_value_msat: 10_000,
                };
                let network_graph = nodes[0].network_graph.clone();
@@ -8720,10 +9000,13 @@ mod tests {
 
        #[test]
        fn test_keysend_msg_with_secret_err() {
-               // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+               // Test that we error as expected if we receive a keysend payment that includes a payment
+               // secret when we don't support MPP keysend.
+               let mut reject_mpp_keysend_cfg = test_default_channel_config();
+               reject_mpp_keysend_cfg.accept_mpp_keysend = false;
                let chanmon_cfgs = create_chanmon_cfgs(2);
                let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(reject_mpp_keysend_cfg)]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
                let payer_pubkey = nodes[0].node.get_our_node_id();
@@ -8731,7 +9014,7 @@ mod tests {
 
                let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
                let route_params = RouteParameters {
-                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+                       payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                        final_value_msat: 10_000,
                };
                let network_graph = nodes[0].network_graph.clone();
@@ -9086,12 +9369,14 @@ mod tests {
                                &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                        peer_pks.push(random_pk);
                        nodes[1].node.peer_connected(&random_pk, &msgs::Init {
-                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+                       }, true).unwrap();
                }
                let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                        &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap_err();
 
                // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
                // them if we have too many un-channel'd peers.
@@ -9102,13 +9387,16 @@ mod tests {
                        if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
                }
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap_err();
 
                // but of course if the connection is outbound its allowed...
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
                // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
@@ -9132,7 +9420,8 @@ mod tests {
                // "protected" and can connect again.
                mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
                nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
 
                // Further, because the first channel was funded, we can open another channel with
@@ -9197,7 +9486,8 @@ mod tests {
                        let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                                &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                        nodes[1].node.peer_connected(&random_pk, &msgs::Init {
-                               features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+                       }, true).unwrap();
 
                        nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
                        let events = nodes[1].node.get_and_clear_pending_events();
@@ -9215,7 +9505,8 @@ mod tests {
                let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
                        &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
                nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
-                       features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
+                       features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
                nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
                let events = nodes[1].node.get_and_clear_pending_events();
                match events[0] {
@@ -9278,6 +9569,62 @@ mod tests {
 
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
        }
+
+       #[test]
+       fn test_update_channel_config() {
+               let chanmon_cfg = create_chanmon_cfgs(2);
+               let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+               let mut user_config = test_default_channel_config();
+               let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
+               let nodes = create_network(2, &node_cfg, &node_chanmgr);
+               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let channel = &nodes[0].node.list_channels()[0];
+
+               nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
+
+               user_config.channel_config.forwarding_fee_base_msat += 10;
+               nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 0);
+
+               let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+                       cltv_expiry_delta: Some(new_cltv_expiry_delta),
+                       ..Default::default()
+               }).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+
+               let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
+               nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
+                       forwarding_fee_proportional_millionths: Some(new_fee),
+                       ..Default::default()
+               }).unwrap();
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
+               assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
+               let events = nodes[0].node.get_and_clear_pending_msg_events();
+               assert_eq!(events.len(), 1);
+               match &events[0] {
+                       MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+                       _ => panic!("expected BroadcastChannelUpdate event"),
+               }
+       }
 }
 
 #[cfg(ldk_bench)]
@@ -9359,8 +9706,12 @@ pub mod bench {
                });
                let node_b_holder = ANodeHolder { node: &node_b };
 
-               node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
-               node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
+               node_a.peer_connected(&node_b.get_our_node_id(), &Init {
+                       features: node_b.init_features(), networks: None, remote_network_address: None
+               }, true).unwrap();
+               node_b.peer_connected(&node_a.get_our_node_id(), &Init {
+                       features: node_a.init_features(), networks: None, remote_network_address: None
+               }, false).unwrap();
                node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
                node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
                node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));