Merge pull request #2006 from TheBlueMatt/2023-02-no-recursive-read-locks v0.0.114-beta
authorWilmer Paulino <9447167+wpaulino@users.noreply.github.com>
Tue, 28 Feb 2023 08:24:16 +0000 (00:24 -0800)
committerGitHub <noreply@github.com>
Tue, 28 Feb 2023 08:24:16 +0000 (00:24 -0800)
Refuse recursive read locks

1  2 
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs

index 80cea29fd632d47ee2e38f93df128b3fa25c50ce,1045e77ad04f32a22d00263e33cbdb83b2adc77e..0757e117ce2e7660648856053070c12773b2b6b1
@@@ -1417,7 -1417,7 +1417,7 @@@ macro_rules! emit_channel_ready_event 
  }
  
  macro_rules! handle_monitor_update_completion {
-       ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
+       ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let mut updates = $chan.monitor_updating_restored(&$self.logger,
                        &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
                        $self.best_block.read().unwrap().height());
  
                let channel_id = $chan.channel_id();
                core::mem::drop($peer_state_lock);
+               core::mem::drop($per_peer_state_lock);
  
                $self.handle_monitor_update_completion_actions(update_actions);
  
  }
  
  macro_rules! handle_new_monitor_update {
-       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
+       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
                // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
                // any case so that it won't deadlock.
                debug_assert!($self.id_to_peer.try_lock().is_ok());
                                        .update_id == $update_id) &&
                                        $chan.get_latest_monitor_update_id() == $update_id
                                {
-                                       handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $chan);
+                                       handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
                                }
                                Ok(())
                        },
                }
        } };
-       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
-               handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+       ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
+               handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
        }
  }
  
@@@ -1835,7 -1836,7 +1836,7 @@@ wher
                                        if let Some(monitor_update) = monitor_update_opt.take() {
                                                let update_id = monitor_update.update_id;
                                                let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
-                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
                                        }
  
                                        if chan_entry.get().is_shutdown() {
                })
        }
  
-       // Only public for testing, this should otherwise never be called direcly
-       pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+       #[cfg(test)]
+       pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+               let _lck = self.total_consistency_lock.read().unwrap();
+               self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
+       }
+       fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
+               // The top-level caller should hold the total_consistency_lock read lock.
+               debug_assert!(self.total_consistency_lock.try_write().is_err());
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
                let prng_seed = self.entropy_source.get_secure_random_bytes();
                let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
                }
                let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
  
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                let err: Result<(), _> = loop {
                        let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
                                None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                        Some(monitor_update) => {
                                                let update_id = monitor_update.update_id;
                                                let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
-                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan) {
+                                               if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
                                                        break Err(e);
                                                }
                                                if update_res == ChannelMonitorUpdateStatus::InProgress {
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
        pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments
                        .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
                                |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
        /// `route_params` and retry failed payment paths based on `retry_strategy`.
        pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments
                        .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params,
                                &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
        #[cfg(test)]
        fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
                        |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
                        self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
        /// [`send_payment`]: Self::send_payment
        pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments.send_spontaneous_payment_with_route(
                        route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
                        best_block_height,
        /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
        pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
                        retry_strategy, route_params, &self.router, self.list_usable_channels(),
                        || self.compute_inflight_htlcs(),  &self.entropy_source, &self.node_signer, best_block_height,
        /// us to easily discern them from real payments.
        pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
                let best_block_height = self.best_block.read().unwrap().height();
+               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
                self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
                        |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
                        self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
                        )
                ).unwrap_or(None);
  
-               if let Some(mut peer_state_lock) = peer_state_opt.take() {
+               if peer_state_opt.is_some() {
+                       let mut peer_state_lock = peer_state_opt.unwrap();
                        let peer_state = &mut *peer_state_lock;
                        if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
                                let counterparty_node_id = chan.get().get_counterparty_node_id();
                                        let update_id = monitor_update.update_id;
                                        let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
                                        let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
-                                               peer_state, chan);
+                                               peer_state, per_peer_state, chan);
                                        if let Err(e) = res {
                                                // TODO: This is a *critical* error - we probably updated the outbound edge
                                                // of the HTLC's monitor with a preimage. We should retry this monitor
        }
  
        fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
-               let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+               debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
  
                let counterparty_node_id = match counterparty_node_id {
                        Some(cp_id) => cp_id.clone(),
                if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
                        return;
                }
-               handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, channel.get_mut());
+               handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
        }
  
        /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
                                let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
  
                                let chan = e.insert(chan);
-                               let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
+                               let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
+                                       per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
  
                                // Note that we reply with the new channel_id in error messages if we gave up on the
                                // channel, not the temporary_channel_id. This is compatible with ourselves, but the
                                let monitor = try_chan_entry!(self,
                                        chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
                                let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
-                               let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, chan);
+                               let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
                                if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
                                        // We weren't able to watch the channel to begin with, so no updates should be made on
                                        // it. Previously, full_stack_target found an (unreachable) panic when the
                                        if let Some(monitor_update) = monitor_update_opt {
                                                let update_id = monitor_update.update_id;
                                                let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
-                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
+                                               break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
                                        }
                                        break Ok(());
                                },
                                let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
                                let update_id = monitor_update.update_id;
                                handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
-                                       peer_state, chan)
+                                       peer_state, per_peer_state, chan)
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                }
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
                let (htlcs_to_fail, res) = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
-                       let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+                       let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                        debug_assert!(false);
                                        MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
-                               })?;
-                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                               }).map(|mtx| mtx.lock().unwrap())?;
                        let peer_state = &mut *peer_state_lock;
                        match peer_state.channel_by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
                                        let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
                                        let update_id = monitor_update.update_id;
-                                       let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
-                                               peer_state, chan);
+                                       let res = handle_new_monitor_update!(self, update_res, update_id,
+                                               peer_state_lock, peer_state, per_peer_state, chan);
                                        (htlcs_to_fail, res)
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
  
        /// Process pending events from the `chain::Watch`, returning whether any events were processed.
        fn process_pending_monitor_events(&self) -> bool {
+               debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
                let mut failed_channels = Vec::new();
                let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
                let has_pending_monitor_events = !pending_monitor_events.is_empty();
        /// update events as a separate process method here.
        #[cfg(fuzzing)]
        pub fn process_monitor_events(&self) {
-               self.process_pending_monitor_events();
+               PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+                       if self.process_pending_monitor_events() {
+                               NotifyOption::DoPersist
+                       } else {
+                               NotifyOption::SkipPersist
+                       }
+               });
        }
  
        /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
                let mut handle_errors = Vec::new();
-               let per_peer_state = self.per_peer_state.read().unwrap();
  
-               for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
-                       'chan_loop: loop {
-                               let mut peer_state_lock = peer_state_mutex.lock().unwrap();
-                               let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
-                               for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
-                                       let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let funding_txo = chan.get_funding_txo();
-                                       let (monitor_opt, holding_cell_failed_htlcs) =
-                                               chan.maybe_free_holding_cell_htlcs(&self.logger);
-                                       if !holding_cell_failed_htlcs.is_empty() {
-                                               failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
-                                       }
-                                       if let Some(monitor_update) = monitor_opt {
-                                               has_monitor_update = true;
-                                               let update_res = self.chain_monitor.update_channel(
-                                                       funding_txo.expect("channel is live"), monitor_update);
-                                               let update_id = monitor_update.update_id;
-                                               let channel_id: [u8; 32] = *channel_id;
-                                               let res = handle_new_monitor_update!(self, update_res, update_id,
-                                                       peer_state_lock, peer_state, chan, MANUALLY_REMOVING,
-                                                       peer_state.channel_by_id.remove(&channel_id));
-                                               if res.is_err() {
-                                                       handle_errors.push((counterparty_node_id, res));
+               // Walk our list of channels and find any that need to update. Note that when we do find an
+               // update, if it includes actions that must be taken afterwards, we have to drop the
+               // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
+               // manage to go through all our peers without finding a single channel to update.
+               'peer_loop: loop {
+                       let per_peer_state = self.per_peer_state.read().unwrap();
+                       for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+                               'chan_loop: loop {
+                                       let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+                                       let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
+                                       for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+                                               let counterparty_node_id = chan.get_counterparty_node_id();
+                                               let funding_txo = chan.get_funding_txo();
+                                               let (monitor_opt, holding_cell_failed_htlcs) =
+                                                       chan.maybe_free_holding_cell_htlcs(&self.logger);
+                                               if !holding_cell_failed_htlcs.is_empty() {
+                                                       failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
+                                               }
+                                               if let Some(monitor_update) = monitor_opt {
+                                                       has_monitor_update = true;
+                                                       let update_res = self.chain_monitor.update_channel(
+                                                               funding_txo.expect("channel is live"), monitor_update);
+                                                       let update_id = monitor_update.update_id;
+                                                       let channel_id: [u8; 32] = *channel_id;
+                                                       let res = handle_new_monitor_update!(self, update_res, update_id,
+                                                               peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
+                                                               peer_state.channel_by_id.remove(&channel_id));
+                                                       if res.is_err() {
+                                                               handle_errors.push((counterparty_node_id, res));
+                                                       }
+                                                       continue 'peer_loop;
                                                }
-                                               continue 'chan_loop;
                                        }
+                                       break 'chan_loop;
                                }
-                               break 'chan_loop;
                        }
+                       break 'peer_loop;
                }
  
                let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
@@@ -6745,36 -6774,27 +6774,36 @@@ impl Readable for HTLCSource 
                        0 => {
                                let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
                                let mut first_hop_htlc_msat: u64 = 0;
 -                              let mut path = Some(Vec::new());
 +                              let mut path: Option<Vec<RouteHop>> = Some(Vec::new());
                                let mut payment_id = None;
                                let mut payment_secret = None;
 -                              let mut payment_params = None;
 +                              let mut payment_params: Option<PaymentParameters> = None;
                                read_tlv_fields!(reader, {
                                        (0, session_priv, required),
                                        (1, payment_id, option),
                                        (2, first_hop_htlc_msat, required),
                                        (3, payment_secret, option),
                                        (4, path, vec_type),
 -                                      (5, payment_params, option),
 +                                      (5, payment_params, (option: ReadableArgs, 0)),
                                });
                                if payment_id.is_none() {
                                        // For backwards compat, if there was no payment_id written, use the session_priv bytes
                                        // instead.
                                        payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
                                }
 +                              if path.is_none() || path.as_ref().unwrap().is_empty() {
 +                                      return Err(DecodeError::InvalidValue);
 +                              }
 +                              let path = path.unwrap();
 +                              if let Some(params) = payment_params.as_mut() {
 +                                      if params.final_cltv_expiry_delta == 0 {
 +                                              params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta;
 +                                      }
 +                              }
                                Ok(HTLCSource::OutboundRoute {
                                        session_priv: session_priv.0.unwrap(),
                                        first_hop_htlc_msat,
 -                                      path: path.unwrap(),
 +                                      path,
                                        payment_id: payment_id.unwrap(),
                                        payment_secret,
                                        payment_params,
@@@ -6921,7 -6941,10 +6950,10 @@@ wher
                let mut monitor_update_blocked_actions_per_peer = None;
                let mut peer_states = Vec::new();
                for (_, peer_state_mutex) in per_peer_state.iter() {
-                       peer_states.push(peer_state_mutex.lock().unwrap());
+                       // Because we're holding the owning `per_peer_state` write lock here there's no chance
+                       // of a lockorder violation deadlock - no other thread can be holding any
+                       // per_peer_state lock at all.
+                       peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
                }
  
                (serializable_peer_count).write(writer)?;
@@@ -7509,7 -7532,10 +7541,10 @@@ wher
                        }
                }
  
-               let pending_outbounds = OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()) };
+               let pending_outbounds = OutboundPayments {
+                       pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+                       retry_lock: Mutex::new(())
+               };
                if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
                        // If we have pending HTLCs to forward, assume we either dropped a
                        // `PendingHTLCsForwardable` or the user received it but never processed it as they
@@@ -7850,7 -7876,7 +7885,7 @@@ mod tests 
                // indicates there are more HTLCs coming.
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
                let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
-               nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+               nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                expect_payment_failed!(nodes[0], our_payment_hash, true);
  
                // Send the second half of the original MPP payment.
-               nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+               nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
                        final_value_msat: 100_000,
 -                      final_cltv_expiry_delta: TEST_FINAL_CLTV,
                };
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
                        final_value_msat: 10_000,
 -                      final_cltv_expiry_delta: 40,
                };
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
                        final_value_msat: 10_000,
 -                      final_cltv_expiry_delta: 40,
                };
                let network_graph = nodes[0].network_graph.clone();
                let first_hops = nodes[0].node.list_usable_channels();
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
                        assert!(nodes_0_lock.contains_key(channel_id));
-                       assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
                }
  
+               assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
                let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
  
                nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
                        assert!(nodes_0_lock.contains_key(channel_id));
+               }
  
+               {
                        // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
                        // as it has the funding transaction.
                        let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
                        let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
                        assert_eq!(nodes_0_lock.len(), 1);
                        assert!(nodes_0_lock.contains_key(channel_id));
+               }
  
+               {
                        // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
                        // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
                        // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
index e16b20b897219fbd096550dddd7efcc603bcd7bf,36f10f742be8c4c808e868d445a39f5b2f88994e..454fbe2b7819889e3246314a90ac82eed17f40f9
@@@ -4083,7 -4083,7 +4083,7 @@@ fn do_test_htlc_timeout(send_partial_mp
                let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
                let payment_id = PaymentId([42; 32]);
                let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &route).unwrap();
-               nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+               nodes[0].node.test_send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
                check_added_monitors!(nodes[0], 1);
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
@@@ -8150,12 -8150,13 +8150,13 @@@ fn test_update_err_monitor_lockdown() 
        let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
        let persister = test_utils::TestPersister::new();
        let watchtower = {
-               let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-               let mut w = test_utils::TestVecWriter(Vec::new());
-               monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
-               assert!(new_monitor == *monitor);
+               let new_monitor = {
+                       let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                                       &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+                       assert!(new_monitor == *monitor);
+                       new_monitor
+               };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
@@@ -8217,12 -8218,13 +8218,13 @@@ fn test_concurrent_monitor_claim() 
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
        let persister = test_utils::TestPersister::new();
        let watchtower_alice = {
-               let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-               let mut w = test_utils::TestVecWriter(Vec::new());
-               monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
-               assert!(new_monitor == *monitor);
+               let new_monitor = {
+                       let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                                       &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+                       assert!(new_monitor == *monitor);
+                       new_monitor
+               };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
        let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
        let persister = test_utils::TestPersister::new();
        let watchtower_bob = {
-               let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
-               let mut w = test_utils::TestVecWriter(Vec::new());
-               monitor.write(&mut w).unwrap();
-               let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
-               assert!(new_monitor == *monitor);
+               let new_monitor = {
+                       let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
+                       let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+                                       &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
+                       assert!(new_monitor == *monitor);
+                       new_monitor
+               };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
                watchtower
@@@ -9141,20 -9144,20 +9144,20 @@@ fn test_inconsistent_mpp_params() 
                dup_route.paths.push(route.paths[1].clone());
                nodes[0].node.test_add_new_pending_payment(our_payment_hash, Some(our_payment_secret), payment_id, &dup_route).unwrap()
        };
-       {
-               nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
-               check_added_monitors!(nodes[0], 1);
+       nodes[0].node.test_send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
+       check_added_monitors!(nodes[0], 1);
  
+       {
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None);
        }
        assert!(nodes[3].node.get_and_clear_pending_events().is_empty());
  
-       {
-               nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
-               check_added_monitors!(nodes[0], 1);
+       nodes[0].node.test_send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
+       check_added_monitors!(nodes[0], 1);
  
+       {
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                let payment_event = SendEvent::from_event(events.pop().unwrap());
  
        expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain());
  
-       nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
+       nodes[0].node.test_send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None, session_privs[2]).unwrap();
        check_added_monitors!(nodes[0], 1);
  
        let mut events = nodes[0].node.get_and_clear_pending_msg_events();
@@@ -9241,6 -9244,7 +9244,6 @@@ fn test_keysend_payments_to_public_node
        let route_params = RouteParameters {
                payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
                final_value_msat: 10000,
 -              final_cltv_expiry_delta: 40,
        };
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
@@@ -9271,6 -9275,7 +9274,6 @@@ fn test_keysend_payments_to_private_nod
        let route_params = RouteParameters {
                payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
                final_value_msat: 10000,
 -              final_cltv_expiry_delta: 40,
        };
        let network_graph = nodes[0].network_graph.clone();
        let first_hops = nodes[0].node.list_usable_channels();
index 72513087fda17edb527bac5aa141657df8a2e956,4dd1b9009763a3bdfae43e3944d9c30f29ea5dc4..c4cd0fc1b09d1ca691654c7fbc32e256bb39ed41
@@@ -98,6 -98,7 +98,6 @@@ fn mpp_retry() 
        let mut route_params = RouteParameters {
                payment_params: route.payment_params.clone().unwrap(),
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
@@@ -296,6 -297,7 +296,6 @@@ fn do_retry_with_no_persist(confirm_bef
        let route_params = RouteParameters {
                payment_params: route.payment_params.clone().unwrap(),
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
        nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@@ -1192,33 -1194,31 +1192,31 @@@ fn test_trivial_inflight_htlc_tracking(
        let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
  
        // Send and claim the payment. Inflight HTLCs should be empty.
-       let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 500000);
-       nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
-       check_added_monitors!(nodes[0], 1);
-       pass_along_route(&nodes[0], &[&vec!(&nodes[1], &nodes[2])[..]], 500000, payment_hash, payment_secret);
-       claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], payment_preimage);
+       let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut node_1_per_peer_lock;
-               let mut node_1_peer_state_lock;
                let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
-               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
  
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               assert_eq!(chan_1_used_liquidity, None);
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
  
-               assert_eq!(chan_1_used_liquidity, None);
                assert_eq!(chan_2_used_liquidity, None);
        }
        let pending_payments = nodes[0].node.list_recent_payments();
        }
  
        // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
-       let (payment_preimage, payment_hash,  _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
+       let (payment_preimage, payment_hash,  _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut node_1_per_peer_lock;
-               let mut node_1_peer_state_lock;
                let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
-               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
  
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               // First hop accounts for expected 1000 msat fee
+               assert_eq!(chan_1_used_liquidity, Some(501000));
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
  
-               // First hop accounts for expected 1000 msat fee
-               assert_eq!(chan_1_used_liquidity, Some(501000));
                assert_eq!(chan_2_used_liquidity, Some(500000));
        }
        let pending_payments = nodes[0].node.list_recent_payments();
                nodes[0].node.timer_tick_occurred();
        }
  
+       let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
        {
-               let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut node_1_per_peer_lock;
-               let mut node_1_peer_state_lock;
                let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
-               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
  
                let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
                        channel_1.get_short_channel_id().unwrap()
                );
+               assert_eq!(chan_1_used_liquidity, None);
+       }
+       {
+               let mut node_1_per_peer_lock;
+               let mut node_1_peer_state_lock;
+               let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
                let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
                        &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
                        &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
                        channel_2.get_short_channel_id().unwrap()
                );
-               assert_eq!(chan_1_used_liquidity, None);
                assert_eq!(chan_2_used_liquidity, None);
        }
  
@@@ -1385,12 -1388,12 +1386,12 @@@ fn do_test_intercepted_payment(test: In
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
        let route = get_route(
                &nodes[0].node.get_our_node_id(), &route_params.payment_params,
                &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
 -              route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes
 +              route_params.payment_params.final_cltv_expiry_delta, nodes[0].logger, &scorer,
 +              &random_seed_bytes,
        ).unwrap();
  
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
@@@ -1575,6 -1578,7 +1576,6 @@@ fn do_automatic_retries(test: AutoRetry
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
        let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
  
@@@ -1784,6 -1788,7 +1785,6 @@@ fn auto_retry_partial_failure() 
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_2_id);
        nodes[0].router.expect_find_route(RouteParameters {
 -                      payment_params, final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV
 +                      payment_params, final_value_msat: amt_msat / 2,
                }, Ok(retry_1_route));
        let mut payment_params = route_params.payment_params.clone();
        payment_params.previously_failed_channels.push(chan_3_id);
        nodes[0].router.expect_find_route(RouteParameters {
 -                      payment_params, final_value_msat: amt_msat / 4, final_cltv_expiry_delta: TEST_FINAL_CLTV
 +                      payment_params, final_value_msat: amt_msat / 4,
                }, Ok(retry_2_route));
  
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
@@@ -1995,6 -2000,7 +1996,6 @@@ fn auto_retry_zero_attempts_send_error(
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
@@@ -2034,6 -2040,7 +2035,6 @@@ fn fails_paying_after_rejected_by_payee
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@@ -2080,6 -2087,7 +2081,6 @@@ fn retry_multi_path_single_failed_payme
        let route_params = RouteParameters {
                payment_params: payment_params.clone(),
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        let chans = nodes[0].node.list_usable_channels();
                        payment_params: pay_params,
                        // Note that the second request here requests the amount we originally failed to send,
                        // not the amount remaining on the full payment, which should be changed.
 -                      final_value_msat: 100_000_001, final_cltv_expiry_delta: TEST_FINAL_CLTV
 +                      final_value_msat: 100_000_001,
                }, Ok(route.clone()));
  
        {
@@@ -2173,6 -2181,7 +2174,6 @@@ fn immediate_retry_on_failure() 
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        let chans = nodes[0].node.list_usable_channels();
        pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
        nodes[0].router.expect_find_route(RouteParameters {
                        payment_params: pay_params, final_value_msat: amt_msat,
 -                      final_cltv_expiry_delta: TEST_FINAL_CLTV
                }, Ok(route.clone()));
  
        nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@@ -2261,6 -2271,7 +2262,6 @@@ fn no_extra_retries_on_back_to_back_fai
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        let mut route = Route {
        route.paths[0][1].fee_msat = amt_msat;
        nodes[0].router.expect_find_route(RouteParameters {
                        payment_params: second_payment_params,
 -                      final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV,
 +                      final_value_msat: amt_msat,
                }, Ok(route.clone()));
  
        nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@@ -2461,6 -2472,7 +2462,6 @@@ fn test_simple_partial_retry() 
        let route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        let mut route = Route {
        route.paths.remove(0);
        nodes[0].router.expect_find_route(RouteParameters {
                        payment_params: second_payment_params,
 -                      final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV,
 +                      final_value_msat: amt_msat / 2,
                }, Ok(route.clone()));
  
        nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
@@@ -2626,6 -2638,7 +2627,6 @@@ fn test_threaded_payment_retries() 
        let mut route_params = RouteParameters {
                payment_params,
                final_value_msat: amt_msat,
 -              final_cltv_expiry_delta: TEST_FINAL_CLTV,
        };
  
        let mut route = Route {