Correct ChannelUnavailable error docs on `send_payment_with_route`
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 2748062c905c3539a7cc43fa59498b733662e5a3..23b45077af02bafb0ea53df1f55455260283cf66 100644 (file)
@@ -495,6 +495,10 @@ impl MsgHandleErrInternal {
                        channel_capacity: None,
                }
        }
+
+       fn closes_channel(&self) -> bool {
+               self.chan_id.is_some()
+       }
 }
 
 /// We hold back HTLCs we intend to relay for a random interval greater than this (see
@@ -835,33 +839,46 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> =
                &'g L
        >;
 
-macro_rules! define_test_pub_trait { ($vis: vis) => {
-/// A trivial trait which describes any [`ChannelManager`] used in testing.
-$vis trait AChannelManager {
+/// A trivial trait which describes any [`ChannelManager`].
+pub trait AChannelManager {
+       /// A type implementing [`chain::Watch`].
        type Watch: chain::Watch<Self::Signer> + ?Sized;
+       /// A type that may be dereferenced to [`Self::Watch`].
        type M: Deref<Target = Self::Watch>;
+       /// A type implementing [`BroadcasterInterface`].
        type Broadcaster: BroadcasterInterface + ?Sized;
+       /// A type that may be dereferenced to [`Self::Broadcaster`].
        type T: Deref<Target = Self::Broadcaster>;
+       /// A type implementing [`EntropySource`].
        type EntropySource: EntropySource + ?Sized;
+       /// A type that may be dereferenced to [`Self::EntropySource`].
        type ES: Deref<Target = Self::EntropySource>;
+       /// A type implementing [`NodeSigner`].
        type NodeSigner: NodeSigner + ?Sized;
+       /// A type that may be dereferenced to [`Self::NodeSigner`].
        type NS: Deref<Target = Self::NodeSigner>;
+       /// A type implementing [`WriteableEcdsaChannelSigner`].
        type Signer: WriteableEcdsaChannelSigner + Sized;
+       /// A type implementing [`SignerProvider`] for [`Self::Signer`].
        type SignerProvider: SignerProvider<Signer = Self::Signer> + ?Sized;
+       /// A type that may be dereferenced to [`Self::SignerProvider`].
        type SP: Deref<Target = Self::SignerProvider>;
+       /// A type implementing [`FeeEstimator`].
        type FeeEstimator: FeeEstimator + ?Sized;
+       /// A type that may be dereferenced to [`Self::FeeEstimator`].
        type F: Deref<Target = Self::FeeEstimator>;
+       /// A type implementing [`Router`].
        type Router: Router + ?Sized;
+       /// A type that may be dereferenced to [`Self::Router`].
        type R: Deref<Target = Self::Router>;
+       /// A type implementing [`Logger`].
        type Logger: Logger + ?Sized;
+       /// A type that may be dereferenced to [`Self::Logger`].
        type L: Deref<Target = Self::Logger>;
+       /// Returns a reference to the actual [`ChannelManager`] object.
        fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
 }
-} }
-#[cfg(any(test, feature = "_test_utils"))]
-define_test_pub_trait!(pub);
-#[cfg(not(any(test, feature = "_test_utils")))]
-define_test_pub_trait!(pub(crate));
+
 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
 for ChannelManager<M, T, ES, NS, SP, F, R, L>
 where
@@ -906,12 +923,14 @@ where
 /// called [`funding_transaction_generated`] for outbound channels) being closed.
 ///
 /// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
-/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
-/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
-/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
-/// the serialization process). If the deserialized version is out-of-date compared to the
-/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
-/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
+/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
+/// [`ChannelMonitorUpdate`] before returning from
+/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
+/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
+/// `ChannelManager` operations from occurring during the serialization process). If the
+/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
+/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
+/// will be lost (modulo on-chain transaction fees).
 ///
 /// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
 /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
@@ -1185,7 +1204,8 @@ where
 
        background_events_processed_since_startup: AtomicBool,
 
-       persistence_notifier: Notifier,
+       event_persist_notifier: Notifier,
+       needs_persist_flag: AtomicBool,
 
        entropy_source: ES,
        node_signer: NS,
@@ -1214,7 +1234,8 @@ pub struct ChainParameters {
 #[must_use]
 enum NotifyOption {
        DoPersist,
-       SkipPersist,
+       SkipPersistHandleEvents,
+       SkipPersistNoEvents,
 }
 
 /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
@@ -1227,43 +1248,75 @@ enum NotifyOption {
 /// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
 /// notify or not based on whether relevant changes have been made, providing a closure to
 /// `optionally_notify` which returns a `NotifyOption`.
-struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
-       persistence_notifier: &'a Notifier,
+struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
+       event_persist_notifier: &'a Notifier,
+       needs_persist_flag: &'a AtomicBool,
        should_persist: F,
        // We hold onto this result so the lock doesn't get released immediately.
        _read_guard: RwLockReadGuard<'a, ()>,
 }
 
 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
-       fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+       /// Notifies any waiters and indicates that we need to persist, in addition to possibly having
+       /// events to handle.
+       ///
+       /// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
+       /// other cases where losing the changes on restart may result in a force-close or otherwise
+       /// isn't ideal.
+       fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
+               Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
+       }
+
+       fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
+       -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
                let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
-               let _ = cm.get_cm().process_background_events(); // We always persist
+               let force_notify = cm.get_cm().process_background_events();
 
                PersistenceNotifierGuard {
-                       persistence_notifier: &cm.get_cm().persistence_notifier,
-                       should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+                       event_persist_notifier: &cm.get_cm().event_persist_notifier,
+                       needs_persist_flag: &cm.get_cm().needs_persist_flag,
+                       should_persist: move || {
+                               // Pick the "most" action between `persist_check` and the background events
+                               // processing and return that.
+                               let notify = persist_check();
+                               match (notify, force_notify) {
+                                       (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
+                                       (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
+                                       (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
+                                       (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
+                                       _ => NotifyOption::SkipPersistNoEvents,
+                               }
+                       },
                        _read_guard: read_guard,
                }
-
        }
 
        /// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
-       /// [`ChannelManager::process_background_events`] MUST be called first.
-       fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
-               let read_guard = lock.read().unwrap();
+       /// [`ChannelManager::process_background_events`] MUST be called first (or
+       /// [`Self::optionally_notify`] used).
+       fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
+       (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+               let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
 
                PersistenceNotifierGuard {
-                       persistence_notifier: notifier,
+                       event_persist_notifier: &cm.get_cm().event_persist_notifier,
+                       needs_persist_flag: &cm.get_cm().needs_persist_flag,
                        should_persist: persist_check,
                        _read_guard: read_guard,
                }
        }
 }
 
-impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
+impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
        fn drop(&mut self) {
-               if (self.should_persist)() == NotifyOption::DoPersist {
-                       self.persistence_notifier.notify();
+               match (self.should_persist)() {
+                       NotifyOption::DoPersist => {
+                               self.needs_persist_flag.store(true, Ordering::Release);
+                               self.event_persist_notifier.notify()
+                       },
+                       NotifyOption::SkipPersistHandleEvents =>
+                               self.event_persist_notifier.notify(),
+                       NotifyOption::SkipPersistNoEvents => {},
                }
        }
 }
@@ -1989,56 +2042,30 @@ macro_rules! handle_monitor_update_completion {
 }
 
 macro_rules! handle_new_monitor_update {
-       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
-               // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
-               // any case so that it won't deadlock.
-               debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
+       ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
                match $update_res {
+                       ChannelMonitorUpdateStatus::UnrecoverableError => {
+                               let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
+                               log_error!($self.logger, "{}", err_str);
+                               panic!("{}", err_str);
+                       },
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        &$chan.context.channel_id());
-                               Ok(false)
-                       },
-                       ChannelMonitorUpdateStatus::PermanentFailure => {
-                               log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
-                                       &$chan.context.channel_id());
-                               update_maps_on_chan_removal!($self, &$chan.context);
-                               let res = Err(MsgHandleErrInternal::from_finish_shutdown(
-                                       "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
-                                       $chan.context.get_user_id(), $chan.context.force_shutdown(false),
-                                       $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
-                               $remove;
-                               res
+                               false
                        },
                        ChannelMonitorUpdateStatus::Completed => {
                                $completed;
-                               Ok(true)
+                               true
                        },
                }
        } };
-       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
-               handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
-                       $per_peer_state_lock, $chan, _internal, $remove,
+       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
+               handle_new_monitor_update!($self, $update_res, $chan, _internal,
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
-       ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
-               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
-                       handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
-                               $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
-               } else {
-                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
-                       // update).
-                       debug_assert!(false);
-                       let channel_id = *$chan_entry.key();
-                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
-                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
-                               $chan_entry.get_mut(), &channel_id);
-                       $chan_entry.remove();
-                       Err(err)
-               }
-       };
-       ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+       ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
                        .or_insert_with(Vec::new);
                // During startup, we push monitor updates as background events through to here in
@@ -2050,8 +2077,7 @@ macro_rules! handle_new_monitor_update {
                                in_flight_updates.len() - 1
                        });
                let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
-               handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
-                       $per_peer_state_lock, $chan, _internal, $remove,
+               handle_new_monitor_update!($self, update_res, $chan, _internal,
                        {
                                let _ = in_flight_updates.remove(idx);
                                if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
@@ -2059,22 +2085,6 @@ macro_rules! handle_new_monitor_update {
                                }
                        })
        } };
-       ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
-               if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
-                       handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
-                               $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
-               } else {
-                       // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
-                       // update).
-                       debug_assert!(false);
-                       let channel_id = *$chan_entry.key();
-                       let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
-                               "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
-                               $chan_entry.get_mut(), &channel_id);
-                       $chan_entry.remove();
-                       Err(err)
-               }
-       }
 }
 
 macro_rules! process_events_body {
@@ -2085,7 +2095,7 @@ macro_rules! process_events_body {
                                return;
                        }
 
-                       let mut result = NotifyOption::SkipPersist;
+                       let mut result;
 
                        {
                                // We'll acquire our total consistency lock so that we can be sure no other
@@ -2094,7 +2104,7 @@ macro_rules! process_events_body {
 
                                // Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
                                // ensure any startup-generated background events are handled first.
-                               if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+                               result = $self.process_background_events();
 
                                // TODO: This behavior should be documented. It's unintuitive that we query
                                // ChannelMonitors when clearing other events.
@@ -2134,8 +2144,14 @@ macro_rules! process_events_body {
                                processed_all_events = false;
                        }
 
-                       if result == NotifyOption::DoPersist {
-                               $self.persistence_notifier.notify();
+                       match result {
+                               NotifyOption::DoPersist => {
+                                       $self.needs_persist_flag.store(true, Ordering::Release);
+                                       $self.event_persist_notifier.notify();
+                               },
+                               NotifyOption::SkipPersistHandleEvents =>
+                                       $self.event_persist_notifier.notify(),
+                               NotifyOption::SkipPersistNoEvents => {},
                        }
                }
        }
@@ -2214,7 +2230,9 @@ where
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        background_events_processed_since_startup: AtomicBool::new(false),
-                       persistence_notifier: Notifier::new(),
+
+                       event_persist_notifier: Notifier::new(),
+                       needs_persist_flag: AtomicBool::new(false),
 
                        entropy_source,
                        node_signer,
@@ -2479,61 +2497,64 @@ where
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let result: Result<(), _> = loop {
-                       {
-                               let per_peer_state = self.per_peer_state.read().unwrap();
+               loop {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
 
-                               let peer_state_mutex = per_peer_state.get(counterparty_node_id)
-                                       .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+                       let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+                               .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state = &mut *peer_state_lock;
+                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                       let peer_state = &mut *peer_state_lock;
 
-                               match peer_state.channel_by_id.entry(channel_id.clone()) {
-                                       hash_map::Entry::Occupied(mut chan_phase_entry) => {
-                                               if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
-                                                       let funding_txo_opt = chan.context.get_funding_txo();
-                                                       let their_features = &peer_state.latest_features;
-                                                       let (shutdown_msg, mut monitor_update_opt, htlcs) =
-                                                               chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
-                                                       failed_htlcs = htlcs;
+                       match peer_state.channel_by_id.entry(channel_id.clone()) {
+                               hash_map::Entry::Occupied(mut chan_phase_entry) => {
+                                       if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+                                               let funding_txo_opt = chan.context.get_funding_txo();
+                                               let their_features = &peer_state.latest_features;
+                                               let (shutdown_msg, mut monitor_update_opt, htlcs) =
+                                                       chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+                                               failed_htlcs = htlcs;
+
+                                               // We can send the `shutdown` message before updating the `ChannelMonitor`
+                                               // here as we don't need the monitor update to complete until we send a
+                                               // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+                                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                                       node_id: *counterparty_node_id,
+                                                       msg: shutdown_msg,
+                                               });
 
-                                                       // We can send the `shutdown` message before updating the `ChannelMonitor`
-                                                       // here as we don't need the monitor update to complete until we send a
-                                                       // `shutdown_signed`, which we'll delay if we're pending a monitor update.
-                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
-                                                               node_id: *counterparty_node_id,
-                                                               msg: shutdown_msg,
-                                                       });
+                                               debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
+                                                       "We can't both complete shutdown and generate a monitor update");
 
-                                                       // Update the monitor with the shutdown script if necessary.
-                                                       if let Some(monitor_update) = monitor_update_opt.take() {
-                                                               break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                                       peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
-                                                       }
+                                               // Update the monitor with the shutdown script if necessary.
+                                               if let Some(monitor_update) = monitor_update_opt.take() {
+                                                       handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan);
+                                                       break;
+                                               }
 
-                                                       if chan.is_shutdown() {
-                                                               if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
-                                                                       if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
-                                                                               peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                                       msg: channel_update
-                                                                               });
-                                                                       }
-                                                                       self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+                                               if chan.is_shutdown() {
+                                                       if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
+                                                               if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
+                                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                               msg: channel_update
+                                                                       });
                                                                }
+                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                        }
-                                                       break Ok(());
                                                }
-                                       },
-                                       hash_map::Entry::Vacant(_) => (),
-                               }
+                                               break;
+                                       }
+                               },
+                               hash_map::Entry::Vacant(_) => {
+                                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
+                                       // it does not exist for this peer. Either way, we can attempt to force-close it.
+                                       //
+                                       // An appropriate error will be returned for non-existence of the channel if that's the case.
+                                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
+                               },
                        }
-                       // If we reach this point, it means that the channel_id either refers to an unfunded channel or
-                       // it does not exist for this peer. Either way, we can attempt to force-close it.
-                       //
-                       // An appropriate error will be returned for non-existence of the channel if that's the case.
-                       return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
-               };
+               }
 
                for htlc_source in failed_htlcs.drain(..) {
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
@@ -2541,7 +2562,6 @@ where
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
 
-               let _ = handle_error!(self, result, *counterparty_node_id);
                Ok(())
        }
 
@@ -2606,8 +2626,13 @@ where
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
 
-       #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
+               debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
+               #[cfg(debug_assertions)]
+               for (_, peer) in self.per_peer_state.read().unwrap().iter() {
+                       debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
+               }
+
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
@@ -2633,8 +2658,7 @@ where
                let peer_state_mutex = per_peer_state.get(peer_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
                let (update_opt, counterparty_node_id) = {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
+                       let mut peer_state = peer_state_mutex.lock().unwrap();
                        let closure_reason = if let Some(peer_msg) = peer_msg {
                                ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
                        } else {
@@ -2644,6 +2668,8 @@ where
                                log_error!(self.logger, "Force-closing channel {}", channel_id);
                                self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+                               mem::drop(peer_state);
+                               mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
                                                self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
@@ -2666,10 +2692,17 @@ where
                        }
                };
                if let Some(update) = update_opt {
-                       let mut peer_state = peer_state_mutex.lock().unwrap();
-                       peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                               msg: update
-                       });
+                       // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
+                       // not try to broadcast it via whatever peer we have.
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       let a_peer_state_opt = per_peer_state.get(peer_node_id)
+                               .ok_or(per_peer_state.values().next());
+                       if let Ok(a_peer_state_mutex) = a_peer_state_opt {
+                               let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
+                               a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                       msg: update
+                               });
+                       }
                }
 
                Ok(counterparty_node_id)
@@ -2749,7 +2782,7 @@ where
                let (short_channel_id, amt_to_forward, outgoing_cltv_value) = match hop_data {
                        msgs::InboundOnionPayload::Forward { short_channel_id, amt_to_forward, outgoing_cltv_value } =>
                                (short_channel_id, amt_to_forward, outgoing_cltv_value),
-                       msgs::InboundOnionPayload::Receive { .. } =>
+                       msgs::InboundOnionPayload::Receive { .. } | msgs::InboundOnionPayload::BlindedReceive { .. } =>
                                return Err(InboundOnionErr {
                                        msg: "Final Node OnionHopData provided for us as an intermediary node",
                                        err_code: 0x4000 | 22,
@@ -2781,12 +2814,19 @@ where
                                payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata, ..
                        } =>
                                (payment_data, keysend_preimage, custom_tlvs, amt_msat, outgoing_cltv_value, payment_metadata),
-                       _ =>
+                       msgs::InboundOnionPayload::BlindedReceive {
+                               amt_msat, total_msat, outgoing_cltv_value, payment_secret, ..
+                       } => {
+                               let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
+                               (Some(payment_data), None, Vec::new(), amt_msat, outgoing_cltv_value, None)
+                       }
+                       msgs::InboundOnionPayload::Forward { .. } => {
                                return Err(InboundOnionErr {
                                        err_code: 0x4000|22,
                                        err_data: Vec::new(),
                                        msg: "Got non final data with an HMAC of 0",
-                               }),
+                               })
+                       },
                };
                // final_incorrect_cltv_expiry
                if outgoing_cltv_value > cltv_expiry {
@@ -2926,7 +2966,10 @@ where
                        }
                }
 
-               let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+               let next_hop = match onion_utils::decode_next_payment_hop(
+                       shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac,
+                       msg.payment_hash, &self.node_signer
+               ) {
                        Ok(res) => res,
                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                return_malformed_err!(err_msg, err_code);
@@ -2948,7 +2991,9 @@ where
                        // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
                        // inbound channel's state.
                        onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret, None)),
-                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } => {
+                       onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::Receive { .. }, .. } |
+                               onion_utils::Hop::Forward { next_hop_data: msgs::InboundOnionPayload::BlindedReceive { .. }, .. } =>
+                       {
                                return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
                        }
                };
@@ -3271,9 +3316,8 @@ where
                                                        }, onion_packet, None, &self.fee_estimator, &self.logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
-                                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
-                                                                       Err(e) => break Err(e),
-                                                                       Ok(false) => {
+                                                               match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+                                                                       false => {
                                                                                // Note that MonitorUpdateInProgress here indicates (per function
                                                                                // docs) that we will resend the commitment update once monitor
                                                                                // updating completes. Therefore, we must return an error
@@ -3282,7 +3326,7 @@ where
                                                                                // MonitorUpdateInProgress, below.
                                                                                return Err(APIError::MonitorUpdateInProgress);
                                                                        },
-                                                                       Ok(true) => {},
+                                                                       true => {},
                                                                }
                                                        },
                                                        None => {},
@@ -3344,9 +3388,8 @@ where
        /// In general, a path may raise:
        ///  * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
        ///    node public key) is specified.
-       ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
-       ///    (including due to previous monitor update failure or new permanent monitor update
-       ///    failure).
+       ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been
+       ///    closed, doesn't exist, or the peer is currently disconnected.
        ///  * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
        ///    relevant updates.
        ///
@@ -4045,7 +4088,10 @@ where
                                                                                        let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
                                                                                        if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
                                                                                                let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
-                                                                                               let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+                                                                                               let next_hop = match onion_utils::decode_next_payment_hop(
+                                                                                                       phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
+                                                                                                       payment_hash, &self.node_signer
+                                                                                               ) {
                                                                                                        Ok(res) => res,
                                                                                                        Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
                                                                                                                let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
@@ -4449,7 +4495,7 @@ where
                let mut background_events = Vec::new();
                mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
                if background_events.is_empty() {
-                       return NotifyOption::SkipPersist;
+                       return NotifyOption::SkipPersistNoEvents;
                }
 
                for event in background_events.drain(..) {
@@ -4461,33 +4507,29 @@ where
                                },
                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
                                        let mut updated_chan = false;
-                                       let res = {
+                                       {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
-                                                                       updated_chan = true;
-                                                                       handle_new_monitor_update!(self, funding_txo, update.clone(),
-                                                                               peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
+                                                                       if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
+                                                                               updated_chan = true;
+                                                                               handle_new_monitor_update!(self, funding_txo, update.clone(),
+                                                                                       peer_state_lock, peer_state, per_peer_state, chan);
+                                                                       } else {
+                                                                               debug_assert!(false, "We shouldn't have an update for a non-funded channel");
+                                                                       }
                                                                },
-                                                               hash_map::Entry::Vacant(_) => Ok(()),
+                                                               hash_map::Entry::Vacant(_) => {},
                                                        }
-                                               } else { Ok(()) }
-                                       };
+                                               }
+                                       }
                                        if !updated_chan {
                                                // TODO: Track this as in-flight even though the channel is closed.
                                                let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                        }
-                                       // TODO: If this channel has since closed, we're likely providing a payment
-                                       // preimage update, which we must ensure is durable! We currently don't,
-                                       // however, ensure that.
-                                       if res.is_err() {
-                                               log_error!(self.logger,
-                                                       "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
-                                       }
-                                       let _ = handle_error!(self, res, counterparty_node_id);
                                },
                                BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
                                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -4518,17 +4560,17 @@ where
        }
 
        fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
-               if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
+               if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
                // If the feerate has decreased by less than half, don't bother
                if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
-                               &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
-                       return NotifyOption::SkipPersist;
+                               chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       return NotifyOption::SkipPersistNoEvents;
                }
                if !chan.context.is_live() {
                        log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
-                               &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
-                       return NotifyOption::SkipPersist;
+                               chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+                       return NotifyOption::SkipPersistNoEvents;
                }
                log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
                        &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
@@ -4543,8 +4585,8 @@ where
        /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
        /// it wants to detect). Thus, we have a variant exposed here for its benefit.
        pub fn maybe_update_chan_fees(&self) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut should_persist = NotifyOption::SkipPersistNoEvents;
 
                        let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                        let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
@@ -4588,8 +4630,8 @@ where
        /// [`ChannelUpdate`]: msgs::ChannelUpdate
        /// [`ChannelConfig`]: crate::util::config::ChannelConfig
        pub fn timer_tick_occurred(&self) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut should_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut should_persist = NotifyOption::SkipPersistNoEvents;
 
                        let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
                        let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
@@ -4597,8 +4639,9 @@ where
                        let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        let mut pending_peers_awaiting_removal = Vec::new();
+                       let mut shutdown_channels = Vec::new();
 
-                       let process_unfunded_channel_tick = |
+                       let mut process_unfunded_channel_tick = |
                                chan_id: &ChannelId,
                                context: &mut ChannelContext<SP>,
                                unfunded_context: &mut UnfundedChannelContext,
@@ -4611,7 +4654,7 @@ where
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
                                        self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
-                                       self.finish_force_close_channel(context.force_shutdown(false));
+                                       shutdown_channels.push(context.force_shutdown(false));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
@@ -4804,6 +4847,10 @@ where
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
 
+                       for shutdown_res in shutdown_channels {
+                               self.finish_force_close_channel(shutdown_res);
+                       }
+
                        self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
 
                        // Technically we don't need to do this here, but if we have holding cell entries in a
@@ -4960,6 +5007,7 @@ where
                // This ensures that future code doesn't introduce a lock-order requirement for
                // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
                // this function with any `per_peer_state` peer lock acquired would.
+               #[cfg(debug_assertions)]
                for (_, peer) in self.per_peer_state.read().unwrap().iter() {
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
@@ -5210,15 +5258,8 @@ where
                                                                peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                        }
                                                        if !during_init {
-                                                               let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
-                                                                       peer_state, per_peer_state, chan_phase_entry);
-                                                               if let Err(e) = res {
-                                                                       // TODO: This is a *critical* error - we probably updated the outbound edge
-                                                                       // of the HTLC's monitor with a preimage. We should retry this monitor
-                                                                       // update over and over again until morale improves.
-                                                                       log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
-                                                                       return Err((counterparty_node_id, e));
-                                                               }
+                                                               handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+                                                                       peer_state, per_peer_state, chan);
                                                        } else {
                                                                // If we're running during init we cannot update a monitor directly -
                                                                // they probably haven't actually been loaded yet. Instead, push the
@@ -5667,6 +5708,8 @@ where
        }
 
        fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+               // likely to be lost on restart!
                if msg.chain_hash != self.genesis_hash {
                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
@@ -5766,6 +5809,8 @@ where
        }
 
        fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+               // likely to be lost on restart!
                let (value, output_script, user_id) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -5841,47 +5886,42 @@ where
                                Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
-                               match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
+                               let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
+                               match id_to_peer_lock.entry(chan.context.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
                                                        funding_msg.channel_id))
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
-                                               i_e.insert(chan.context.get_counterparty_node_id());
-                                       }
-                               }
-
-                               // There's no problem signing a counterparty's funding transaction if our monitor
-                               // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
-                               // accepted payment from yet. We do, however, need to wait to send our channel_ready
-                               // until we have persisted our monitor.
-                               let new_channel_id = funding_msg.channel_id;
-                               peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
-                                       node_id: counterparty_node_id.clone(),
-                                       msg: funding_msg,
-                               });
+                                               let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
+                                               if let Ok(persist_state) = monitor_res {
+                                                       i_e.insert(chan.context.get_counterparty_node_id());
+                                                       mem::drop(id_to_peer_lock);
+
+                                                       // There's no problem signing a counterparty's funding transaction if our monitor
+                                                       // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+                                                       // accepted payment from yet. We do, however, need to wait to send our channel_ready
+                                                       // until we have persisted our monitor.
+                                                       peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+                                                               node_id: counterparty_node_id.clone(),
+                                                               msg: funding_msg,
+                                                       });
 
-                               let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
-
-                               if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
-                                       let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
-                                               per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
-                                               { peer_state.channel_by_id.remove(&new_channel_id) });
-
-                                       // Note that we reply with the new channel_id in error messages if we gave up on the
-                                       // channel, not the temporary_channel_id. This is compatible with ourselves, but the
-                                       // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
-                                       // any messages referencing a previously-closed channel anyway.
-                                       // We do not propagate the monitor update to the user as it would be for a monitor
-                                       // that we didn't manage to store (and that we don't care about - we don't respond
-                                       // with the funding_signed so the channel can never go on chain).
-                                       if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
-                                               res.0 = None;
+                                                       if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
+                                                               handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
+                                                                       per_peer_state, chan, INITIAL_MONITOR);
+                                                       } else {
+                                                               unreachable!("This must be a funded channel as we just inserted it.");
+                                                       }
+                                                       Ok(())
+                                               } else {
+                                                       log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
+                                                       return Err(MsgHandleErrInternal::send_err_msg_no_close(
+                                                               "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
+                                                               funding_msg.channel_id));
+                                               }
                                        }
-                                       res.map(|_| ())
-                               } else {
-                                       unreachable!("This must be a funded channel as we just inserted it.");
                                }
                        }
                }
@@ -5904,17 +5944,12 @@ where
                                        ChannelPhase::Funded(ref mut chan) => {
                                                let monitor = try_chan_phase_entry!(self,
                                                        chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
-                                               let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
-                                               let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
-                                               if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
-                                                       // We weren't able to watch the channel to begin with, so no updates should be made on
-                                                       // it. Previously, full_stack_target found an (unreachable) panic when the
-                                                       // monitor update contained within `shutdown_finish` was applied.
-                                                       if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
-                                                               shutdown_finish.0.take();
-                                                       }
+                                               if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
+                                                       handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
+                                                       Ok(())
+                                               } else {
+                                                       try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
                                                }
-                                               res.map(|_| ())
                                        },
                                        _ => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
@@ -5926,6 +5961,8 @@ where
        }
 
        fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -5978,8 +6015,9 @@ where
        }
 
        fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
-               let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
-               let result: Result<(), _> = loop {
+               let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
+               let mut finish_shutdown = None;
+               {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -6014,31 +6052,32 @@ where
                                                }
                                                // Update the monitor with the shutdown script if necessary.
                                                if let Some(monitor_update) = monitor_update_opt {
-                                                       break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
-                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+                                                       handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan);
                                                }
-                                               break Ok(());
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
                                                log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                                self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
-                                               self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
-                                               return Ok(());
+                                               finish_shutdown = Some(chan.context_mut().force_shutdown(false));
                                        },
                                }
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
-               };
+               }
                for htlc_source in dropped_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
+               if let Some(shutdown_res) = finish_shutdown {
+                       self.finish_force_close_channel(shutdown_res);
+               }
 
-               result
+               Ok(())
        }
 
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
@@ -6104,6 +6143,9 @@ where
                //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
+
                let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
@@ -6196,6 +6238,8 @@ where
        }
 
        fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6219,6 +6263,8 @@ where
        }
 
        fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+               // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+               // closing a channel), so any changes are likely to be lost on restart!
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                        .ok_or_else(|| {
@@ -6261,8 +6307,9 @@ where
                                        let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
-                                                       peer_state, per_peer_state, chan_phase_entry).map(|_| ())
-                                       } else { Ok(()) }
+                                                       peer_state, per_peer_state, chan);
+                                       }
+                                       Ok(())
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6411,7 +6458,7 @@ where
        }
 
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
-               let (htlcs_to_fail, res) = {
+               let htlcs_to_fail = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
@@ -6430,13 +6477,13 @@ where
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
                                                        chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
-                                               let res = if let Some(monitor_update) = monitor_update_opt {
+                                               if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
                                                        handle_new_monitor_update!(self, funding_txo, monitor_update,
-                                                               peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
-                                               } else { Ok(()) };
-                                               (htlcs_to_fail, res)
+                                                               peer_state_lock, peer_state, per_peer_state, chan);
+                                               }
+                                               htlcs_to_fail
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                        "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
@@ -6446,7 +6493,7 @@ where
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
-               res
+               Ok(())
        }
 
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
@@ -6507,19 +6554,19 @@ where
                Ok(())
        }
 
-       /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+       /// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
        fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
                let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
                        Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
                        None => {
                                // It's not a local channel
-                               return Ok(NotifyOption::SkipPersist)
+                               return Ok(NotifyOption::SkipPersistNoEvents)
                        }
                };
                let per_peer_state = self.per_peer_state.read().unwrap();
                let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
                if peer_state_mutex_opt.is_none() {
-                       return Ok(NotifyOption::SkipPersist)
+                       return Ok(NotifyOption::SkipPersistNoEvents)
                }
                let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
                let peer_state = &mut *peer_state_lock;
@@ -6531,14 +6578,14 @@ where
                                                        // If the announcement is about a channel of ours which is public, some
                                                        // other peer may simply be forwarding all its gossip to us. Don't provide
                                                        // a scary-looking error message and return Ok instead.
-                                                       return Ok(NotifyOption::SkipPersist);
+                                                       return Ok(NotifyOption::SkipPersistNoEvents);
                                                }
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
                                        }
                                        let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
                                        let msg_from_node_one = msg.contents.flags & 1 == 0;
                                        if were_node_one == msg_from_node_one {
-                                               return Ok(NotifyOption::SkipPersist);
+                                               return Ok(NotifyOption::SkipPersistNoEvents);
                                        } else {
                                                log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
                                                try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
@@ -6548,12 +6595,12 @@ where
                                                "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
                                }
                        },
-                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+                       hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
                }
                Ok(NotifyOption::DoPersist)
        }
 
-       fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
+       fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
                let htlc_forwards;
                let need_lnd_workaround = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6609,14 +6656,16 @@ where
                        }
                };
 
+               let mut persist = NotifyOption::SkipPersistHandleEvents;
                if let Some(forwards) = htlc_forwards {
                        self.forward_htlcs(&mut [forwards][..]);
+                       persist = NotifyOption::DoPersist;
                }
 
                if let Some(channel_ready_msg) = need_lnd_workaround {
                        self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
                }
-               Ok(())
+               Ok(persist)
        }
 
        /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
@@ -6640,8 +6689,7 @@ where
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
-                                       MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
-                                       MonitorEvent::UpdateFailed(funding_outpoint) => {
+                                       MonitorEvent::HolderForceClosed(funding_outpoint) => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
@@ -6665,12 +6713,7 @@ where
                                                                                                msg: update
                                                                                        });
                                                                                }
-                                                                               let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
-                                                                                       ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
-                                                                               } else {
-                                                                                       ClosureReason::CommitmentTxConfirmed
-                                                                               };
-                                                                               self.issue_channel_close_events(&chan.context, reason);
+                                                                               self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::SendErrorMessage {
@@ -6711,7 +6754,6 @@ where
        fn check_free_holding_cells(&self) -> bool {
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
-               let mut handle_errors = Vec::new();
 
                // Walk our list of channels and find any that need to update. Note that when we do find an
                // update, if it includes actions that must be taken afterwards, we have to drop the
@@ -6736,13 +6778,8 @@ where
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
 
-                                                       let channel_id: ChannelId = *channel_id;
-                                                       let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
-                                                               peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
-                                                               peer_state.channel_by_id.remove(&channel_id));
-                                                       if res.is_err() {
-                                                               handle_errors.push((counterparty_node_id, res));
-                                                       }
+                                                       handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
+                                                               peer_state_lock, peer_state, per_peer_state, chan);
                                                        continue 'peer_loop;
                                                }
                                        }
@@ -6752,15 +6789,11 @@ where
                        break 'peer_loop;
                }
 
-               let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
+               let has_update = has_monitor_update || !failed_htlcs.is_empty();
                for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
                        self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
                }
 
-               for (counterparty_node_id, err) in handle_errors.drain(..) {
-                       let _ = handle_error!(self, err, counterparty_node_id);
-               }
-
                has_update
        }
 
@@ -7055,7 +7088,6 @@ where
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
-               let mut errors = Vec::new();
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
@@ -7087,11 +7119,8 @@ where
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_funding_outpoint.to_channel_id());
-                                                       if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
-                                                               peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
-                                                       {
-                                                               errors.push((e, counterparty_node_id));
-                                                       }
+                                                       handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+                                                               peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
                                                                // If there are more `ChannelMonitorUpdate`s to process, restart at the
                                                                // top of the loop.
@@ -7110,10 +7139,6 @@ where
                        }
                        break;
                }
-               for (err, counterparty_node_id) in errors {
-                       let res = Err::<(), _>(err);
-                       let _ = handle_error!(self, res, counterparty_node_id);
-               }
        }
 
        fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
@@ -7166,8 +7191,8 @@ where
        /// the `MessageSendEvent`s to the specific peer they were generated under.
        fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
                let events = RefCell::new(Vec::new());
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let mut result = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       let mut result = NotifyOption::SkipPersistNoEvents;
 
                        // TODO: This behavior should be documented. It's unintuitive that we query
                        // ChannelMonitors when clearing other events.
@@ -7248,8 +7273,9 @@ where
        }
 
        fn block_disconnected(&self, header: &BlockHeader, height: u32) {
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                let new_height = height - 1;
                {
                        let mut best_block = self.best_block.write().unwrap();
@@ -7283,8 +7309,9 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
                        .map(|(a, b)| (a, Vec::new(), b)));
 
@@ -7303,8 +7330,9 @@ where
                let block_hash = header.block_hash();
                log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
 
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
 
                self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
@@ -7347,8 +7375,9 @@ where
        }
 
        fn transaction_unconfirmed(&self, txid: &Txid) {
-               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
-                       &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+               let _persistence_guard =
+                       PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+                               self, || -> NotifyOption { NotifyOption::DoPersist });
                self.do_chain_event(None, |channel| {
                        if let Some(funding_txo) = channel.context.get_funding_txo() {
                                if funding_txo.txid == *txid {
@@ -7531,18 +7560,26 @@ where
                }
        }
 
-       /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
+       /// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
+       /// may have events that need processing.
+       ///
+       /// In order to check if this [`ChannelManager`] needs persisting, call
+       /// [`Self::get_and_clear_needs_persistence`].
        ///
        /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
        /// [`ChannelManager`] and should instead register actions to be taken later.
-       ///
-       pub fn get_persistable_update_future(&self) -> Future {
-               self.persistence_notifier.get_future()
+       pub fn get_event_or_persistence_needed_future(&self) -> Future {
+               self.event_persist_notifier.get_future()
+       }
+
+       /// Returns true if this [`ChannelManager`] needs to be persisted.
+       pub fn get_and_clear_needs_persistence(&self) -> bool {
+               self.needs_persist_flag.swap(false, Ordering::AcqRel)
        }
 
        #[cfg(any(test, feature = "_test_utils"))]
-       pub fn get_persistence_condvar_value(&self) -> bool {
-               self.persistence_notifier.notify_pending()
+       pub fn get_event_or_persist_condvar_value(&self) -> bool {
+               self.event_persist_notifier.notify_pending()
        }
 
        /// Gets the latest best block which was connected either via the [`chain::Listen`] or
@@ -7599,8 +7636,21 @@ where
        L::Target: Logger,
 {
        fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // open_channel message - pre-funded channels are never written so there should be no
+               // change to the contents.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_open_channel(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => {
+                                       debug_assert!(false, "We shouldn't close a new channel");
+                                       NotifyOption::DoPersist
+                               },
+                               _ => NotifyOption::SkipPersistHandleEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
@@ -7610,8 +7660,13 @@ where
        }
 
        fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // accept_channel message - pre-funded channels are never written so there should be no
+               // change to the contents.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+                       NotifyOption::SkipPersistHandleEvents
+               });
        }
 
        fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
@@ -7631,8 +7686,19 @@ where
        }
 
        fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // channel_ready message - while the channel's state will change, any channel_ready message
+               // will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
+               // will not force-close the channel on startup.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_channel_ready(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               _ => NotifyOption::SkipPersistHandleEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
@@ -7646,8 +7712,19 @@ where
        }
 
        fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_add_htlc message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_add_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
@@ -7656,13 +7733,35 @@ where
        }
 
        fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fail_htlc message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fail_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fail_malformed_htlc message - the message itself doesn't change our channel state
+               // only the `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fail_malformed_htlc(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
@@ -7676,8 +7775,19 @@ where
        }
 
        fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
+               // Note that we never need to persist the updated ChannelManager for an inbound
+               // update_fee message - the message itself doesn't change our channel state only the
+               // `commitment_signed` message afterwards will.
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_update_fee(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(()) => NotifyOption::SkipPersistNoEvents,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
@@ -7686,23 +7796,32 @@ where
        }
 
        fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
-               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
-                       let force_persist = self.process_background_events();
+               PersistenceNotifierGuard::optionally_notify(self, || {
                        if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
-                               if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
+                               persist
                        } else {
-                               NotifyOption::SkipPersist
+                               NotifyOption::DoPersist
                        }
                });
        }
 
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-               let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+                       let res = self.internal_channel_reestablish(counterparty_node_id, msg);
+                       let persist = match &res {
+                               Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+                               Err(_) => NotifyOption::SkipPersistHandleEvents,
+                               Ok(persist) => *persist,
+                       };
+                       let _ = handle_error!(self, res, *counterparty_node_id);
+                       persist
+               });
        }
 
        fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
+                       self, || NotifyOption::SkipPersistHandleEvents);
+
                let mut failed_channels = Vec::new();
                let mut per_peer_state = self.per_peer_state.write().unwrap();
                let remove_peer = {
@@ -7801,76 +7920,82 @@ where
                        return Err(());
                }
 
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+               let mut res = Ok(());
 
-               // If we have too many peers connected which don't have funded channels, disconnect the
-               // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
-               // unfunded channels taking up space in memory for disconnected peers, we still let new
-               // peers connect, but we'll reject new channels from them.
-               let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
-               let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+               PersistenceNotifierGuard::optionally_notify(self, || {
+                       // If we have too many peers connected which don't have funded channels, disconnect the
+                       // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+                       // unfunded channels taking up space in memory for disconnected peers, we still let new
+                       // peers connect, but we'll reject new channels from them.
+                       let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+                       let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
 
-               {
-                       let mut peer_state_lock = self.per_peer_state.write().unwrap();
-                       match peer_state_lock.entry(counterparty_node_id.clone()) {
-                               hash_map::Entry::Vacant(e) => {
-                                       if inbound_peer_limited {
-                                               return Err(());
-                                       }
-                                       e.insert(Mutex::new(PeerState {
-                                               channel_by_id: HashMap::new(),
-                                               inbound_channel_request_by_id: HashMap::new(),
-                                               latest_features: init_msg.features.clone(),
-                                               pending_msg_events: Vec::new(),
-                                               in_flight_monitor_updates: BTreeMap::new(),
-                                               monitor_update_blocked_actions: BTreeMap::new(),
-                                               actions_blocking_raa_monitor_updates: BTreeMap::new(),
-                                               is_connected: true,
-                                       }));
-                               },
-                               hash_map::Entry::Occupied(e) => {
-                                       let mut peer_state = e.get().lock().unwrap();
-                                       peer_state.latest_features = init_msg.features.clone();
-
-                                       let best_block_height = self.best_block.read().unwrap().height();
-                                       if inbound_peer_limited &&
-                                               Self::unfunded_channel_count(&*peer_state, best_block_height) ==
-                                               peer_state.channel_by_id.len()
-                                       {
-                                               return Err(());
-                                       }
+                       {
+                               let mut peer_state_lock = self.per_peer_state.write().unwrap();
+                               match peer_state_lock.entry(counterparty_node_id.clone()) {
+                                       hash_map::Entry::Vacant(e) => {
+                                               if inbound_peer_limited {
+                                                       res = Err(());
+                                                       return NotifyOption::SkipPersistNoEvents;
+                                               }
+                                               e.insert(Mutex::new(PeerState {
+                                                       channel_by_id: HashMap::new(),
+                                                       inbound_channel_request_by_id: HashMap::new(),
+                                                       latest_features: init_msg.features.clone(),
+                                                       pending_msg_events: Vec::new(),
+                                                       in_flight_monitor_updates: BTreeMap::new(),
+                                                       monitor_update_blocked_actions: BTreeMap::new(),
+                                                       actions_blocking_raa_monitor_updates: BTreeMap::new(),
+                                                       is_connected: true,
+                                               }));
+                                       },
+                                       hash_map::Entry::Occupied(e) => {
+                                               let mut peer_state = e.get().lock().unwrap();
+                                               peer_state.latest_features = init_msg.features.clone();
+
+                                               let best_block_height = self.best_block.read().unwrap().height();
+                                               if inbound_peer_limited &&
+                                                       Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+                                                       peer_state.channel_by_id.len()
+                                               {
+                                                       res = Err(());
+                                                       return NotifyOption::SkipPersistNoEvents;
+                                               }
 
-                                       debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
-                                       peer_state.is_connected = true;
-                               },
+                                               debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+                                               peer_state.is_connected = true;
+                                       },
+                               }
                        }
-               }
 
-               log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+                       log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
 
-               let per_peer_state = self.per_peer_state.read().unwrap();
-               if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                       let peer_state = &mut *peer_state_lock;
-                       let pending_msg_events = &mut peer_state.pending_msg_events;
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               let peer_state = &mut *peer_state_lock;
+                               let pending_msg_events = &mut peer_state.pending_msg_events;
 
-                       peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
-                               if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
-                                       // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
-                                       // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
-                                       // worry about closing and removing them.
-                                       debug_assert!(false);
-                                       None
-                               }
-                       ).for_each(|chan| {
-                               pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
-                                       node_id: chan.context.get_counterparty_node_id(),
-                                       msg: chan.get_channel_reestablish(&self.logger),
+                               peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+                                       if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+                                               // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+                                               // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+                                               // worry about closing and removing them.
+                                               debug_assert!(false);
+                                               None
+                                       }
+                               ).for_each(|chan| {
+                                       pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+                                               node_id: chan.context.get_counterparty_node_id(),
+                                               msg: chan.get_channel_reestablish(&self.logger),
+                                       });
                                });
-                       });
-               }
-               //TODO: Also re-broadcast announcement_signatures
-               Ok(())
+                       }
+
+                       return NotifyOption::SkipPersistHandleEvents;
+                       //TODO: Also re-broadcast announcement_signatures
+               });
+               res
        }
 
        fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
@@ -9714,7 +9839,9 @@ where
                        pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
                        background_events_processed_since_startup: AtomicBool::new(false),
-                       persistence_notifier: Notifier::new(),
+
+                       event_persist_notifier: Notifier::new(),
+                       needs_persist_flag: AtomicBool::new(false),
 
                        entropy_source: args.entropy_source,
                        node_signer: args.node_signer,
@@ -9776,9 +9903,9 @@ mod tests {
 
                // All nodes start with a persistable update pending as `create_network` connects each node
                // with all other nodes to make most tests simpler.
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
@@ -9792,19 +9919,19 @@ mod tests {
                        &nodes[0].node.get_our_node_id()).pop().unwrap();
 
                // The first two nodes (which opened a channel) should now require fresh persistence
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                // ... but the last node should not.
-               assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
                // After persisting the first two nodes they should no longer need fresh persistence.
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
                // about the channel.
                nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
                nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
-               assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // The nodes which are a party to the channel should also ignore messages from unrelated
                // parties.
@@ -9812,8 +9939,8 @@ mod tests {
                nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
                nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
                nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
 
                // At this point the channel info given by peers should still be the same.
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
@@ -9830,8 +9957,8 @@ mod tests {
                // persisted and that its channel info remains the same.
                nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
                nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
-               assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
 
@@ -9839,8 +9966,8 @@ mod tests {
                // the channel info has updated.
                nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
                nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
-               assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
-               assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+               assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+               assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
                assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
                assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
        }
@@ -9999,7 +10126,7 @@ mod tests {
                        TEST_FINAL_CLTV, false), 100_000);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -10033,7 +10160,7 @@ mod tests {
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
@@ -10090,7 +10217,7 @@ mod tests {
                );
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_id_2 = PaymentId([45; 32]);
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
@@ -10141,7 +10268,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);
@@ -10186,7 +10313,7 @@ mod tests {
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
 
                let test_preimage = PaymentPreimage([42; 32]);