Merge pull request #2417 from tnull/2023-07-max-total-fee
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 26 Sep 2023 20:07:52 +0000 (20:07 +0000)
committerGitHub <noreply@github.com>
Tue, 26 Sep 2023 20:07:52 +0000 (20:07 +0000)
Add config option to set maximum total routing fee

1  2 
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/outbound_payment.rs
lightning/src/ln/payment_tests.rs
lightning/src/routing/router.rs

index e2641d2cf16a7270faeb74887d7642f4cc40abeb,c76ca58f59fb9cecd319dcad9d49ddf67649e139..3ef57c5b87f024e8861ecabf7226c4f6eae19ad7
@@@ -923,14 -923,12 +923,14 @@@ wher
  /// called [`funding_transaction_generated`] for outbound channels) being closed.
  ///
  /// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
 -/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
 -/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
 -/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
 -/// the serialization process). If the deserialized version is out-of-date compared to the
 -/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
 -/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
 +/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
 +/// [`ChannelMonitorUpdate`] before returning from
 +/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
 +/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
 +/// `ChannelManager` operations from occurring during the serialization process). If the
 +/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
 +/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
 +/// will be lost (modulo on-chain transaction fees).
  ///
  /// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
  /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
@@@ -2042,30 -2040,56 +2042,30 @@@ macro_rules! handle_monitor_update_comp
  }
  
  macro_rules! handle_new_monitor_update {
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
 -              // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
 -              // any case so that it won't deadlock.
 -              debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
 +      ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
                match $update_res {
 +                      ChannelMonitorUpdateStatus::UnrecoverableError => {
 +                              let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
 +                              log_error!($self.logger, "{}", err_str);
 +                              panic!("{}", err_str);
 +                      },
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        &$chan.context.channel_id());
 -                              Ok(false)
 -                      },
 -                      ChannelMonitorUpdateStatus::PermanentFailure => {
 -                              log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
 -                                      &$chan.context.channel_id());
 -                              update_maps_on_chan_removal!($self, &$chan.context);
 -                              let res = Err(MsgHandleErrInternal::from_finish_shutdown(
 -                                      "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
 -                                      $chan.context.get_user_id(), $chan.context.force_shutdown(false),
 -                                      $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
 -                              $remove;
 -                              res
 +                              false
                        },
                        ChannelMonitorUpdateStatus::Completed => {
                                $completed;
 -                              Ok(true)
 +                              true
                        },
                }
        } };
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
 -              handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
 -                      $per_peer_state_lock, $chan, _internal, $remove,
 +      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
 +              handle_new_monitor_update!($self, $update_res, $chan, _internal,
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
 -              if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
 -                      handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
 -                              $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
 -              } else {
 -                      // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
 -                      // update).
 -                      debug_assert!(false);
 -                      let channel_id = *$chan_entry.key();
 -                      let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
 -                              "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
 -                              $chan_entry.get_mut(), &channel_id);
 -                      $chan_entry.remove();
 -                      Err(err)
 -              }
 -      };
 -      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
 +      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
                        .or_insert_with(Vec::new);
                // During startup, we push monitor updates as background events through to here in
                                in_flight_updates.len() - 1
                        });
                let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
 -              handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
 -                      $per_peer_state_lock, $chan, _internal, $remove,
 +              handle_new_monitor_update!($self, update_res, $chan, _internal,
                        {
                                let _ = in_flight_updates.remove(idx);
                                if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
                                }
                        })
        } };
 -      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
 -              if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
 -                      handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
 -                              $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
 -              } else {
 -                      // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
 -                      // update).
 -                      debug_assert!(false);
 -                      let channel_id = *$chan_entry.key();
 -                      let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
 -                              "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
 -                              $chan_entry.get_mut(), &channel_id);
 -                      $chan_entry.remove();
 -                      Err(err)
 -              }
 -      }
  }
  
  macro_rules! process_events_body {
@@@ -2497,64 -2538,61 +2497,64 @@@ wher
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
 -              let result: Result<(), _> = loop {
 -                      {
 -                              let per_peer_state = self.per_peer_state.read().unwrap();
 +              loop {
 +                      let per_peer_state = self.per_peer_state.read().unwrap();
  
 -                              let peer_state_mutex = per_peer_state.get(counterparty_node_id)
 -                                      .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 +                      let peer_state_mutex = per_peer_state.get(counterparty_node_id)
 +                              .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
  
 -                              let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 -                              let peer_state = &mut *peer_state_lock;
 +                      let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 +                      let peer_state = &mut *peer_state_lock;
  
 -                              match peer_state.channel_by_id.entry(channel_id.clone()) {
 -                                      hash_map::Entry::Occupied(mut chan_phase_entry) => {
 -                                              if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 -                                                      let funding_txo_opt = chan.context.get_funding_txo();
 -                                                      let their_features = &peer_state.latest_features;
 -                                                      let (shutdown_msg, mut monitor_update_opt, htlcs) =
 -                                                              chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
 -                                                      failed_htlcs = htlcs;
 +                      match peer_state.channel_by_id.entry(channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan_phase_entry) => {
 +                                      if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 +                                              let funding_txo_opt = chan.context.get_funding_txo();
 +                                              let their_features = &peer_state.latest_features;
 +                                              let (shutdown_msg, mut monitor_update_opt, htlcs) =
 +                                                      chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
 +                                              failed_htlcs = htlcs;
 +
 +                                              // We can send the `shutdown` message before updating the `ChannelMonitor`
 +                                              // here as we don't need the monitor update to complete until we send a
 +                                              // `shutdown_signed`, which we'll delay if we're pending a monitor update.
 +                                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                                      node_id: *counterparty_node_id,
 +                                                      msg: shutdown_msg,
 +                                              });
  
 -                                                      // We can send the `shutdown` message before updating the `ChannelMonitor`
 -                                                      // here as we don't need the monitor update to complete until we send a
 -                                                      // `shutdown_signed`, which we'll delay if we're pending a monitor update.
 -                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 -                                                              node_id: *counterparty_node_id,
 -                                                              msg: shutdown_msg,
 -                                                      });
 +                                              debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
 +                                                      "We can't both complete shutdown and generate a monitor update");
  
 -                                                      // Update the monitor with the shutdown script if necessary.
 -                                                      if let Some(monitor_update) = monitor_update_opt.take() {
 -                                                              break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 -                                                                      peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
 -                                                      }
 +                                              // Update the monitor with the shutdown script if necessary.
 +                                              if let Some(monitor_update) = monitor_update_opt.take() {
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
 +                                                      break;
 +                                              }
  
 -                                                      if chan.is_shutdown() {
 -                                                              if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                                      if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                              peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                                                      msg: channel_update
 -                                                                              });
 -                                                                      }
 -                                                                      self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
 +                                              if chan.is_shutdown() {
 +                                                      if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
 +                                                              if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
 +                                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                              msg: channel_update
 +                                                                      });
                                                                }
 +                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                        }
 -                                                      break Ok(());
                                                }
 -                                      },
 -                                      hash_map::Entry::Vacant(_) => (),
 -                              }
 +                                              break;
 +                                      }
 +                              },
 +                              hash_map::Entry::Vacant(_) => {
 +                                      // If we reach this point, it means that the channel_id either refers to an unfunded channel or
 +                                      // it does not exist for this peer. Either way, we can attempt to force-close it.
 +                                      //
 +                                      // An appropriate error will be returned for non-existence of the channel if that's the case.
 +                                      return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
 +                              },
                        }
 -                      // If we reach this point, it means that the channel_id either refers to an unfunded channel or
 -                      // it does not exist for this peer. Either way, we can attempt to force-close it.
 -                      //
 -                      // An appropriate error will be returned for non-existence of the channel if that's the case.
 -                      return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
 -              };
 +              }
  
                for htlc_source in failed_htlcs.drain(..) {
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
  
 -              let _ = handle_error!(self, result, *counterparty_node_id);
                Ok(())
        }
  
                self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
        }
  
 -      #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
 +              debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
 +              #[cfg(debug_assertions)]
 +              for (_, peer) in self.per_peer_state.read().unwrap().iter() {
 +                      debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
 +              }
 +
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                let peer_state_mutex = per_peer_state.get(peer_node_id)
                        .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
                let (update_opt, counterparty_node_id) = {
 -                      let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 -                      let peer_state = &mut *peer_state_lock;
 +                      let mut peer_state = peer_state_mutex.lock().unwrap();
                        let closure_reason = if let Some(peer_msg) = peer_msg {
                                ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
                        } else {
                                log_error!(self.logger, "Force-closing channel {}", channel_id);
                                self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
                                let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
 +                              mem::drop(peer_state);
 +                              mem::drop(per_peer_state);
                                match chan_phase {
                                        ChannelPhase::Funded(mut chan) => {
                                                self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
                        }
                };
                if let Some(update) = update_opt {
 -                      let mut peer_state = peer_state_mutex.lock().unwrap();
 -                      peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                              msg: update
 -                      });
 +                      // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
 +                      // not try to broadcast it via whatever peer we have.
 +                      let per_peer_state = self.per_peer_state.read().unwrap();
 +                      let a_peer_state_opt = per_peer_state.get(peer_node_id)
 +                              .ok_or(per_peer_state.values().next());
 +                      if let Ok(a_peer_state_mutex) = a_peer_state_opt {
 +                              let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
 +                              a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                      msg: update
 +                              });
 +                      }
                }
  
                Ok(counterparty_node_id)
                                                        }, onion_packet, None, &self.fee_estimator, &self.logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
 -                                                              match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
 -                                                                      Err(e) => break Err(e),
 -                                                                      Ok(false) => {
 +                                                              match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
 +                                                                      false => {
                                                                                // Note that MonitorUpdateInProgress here indicates (per function
                                                                                // docs) that we will resend the commitment update once monitor
                                                                                // updating completes. Therefore, we must return an error
                                                                                // MonitorUpdateInProgress, below.
                                                                                return Err(APIError::MonitorUpdateInProgress);
                                                                        },
 -                                                                      Ok(true) => {},
 +                                                                      true => {},
                                                                }
                                                        },
                                                        None => {},
                let payment_params =
                        PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
  
-               let route_params = RouteParameters { payment_params, final_value_msat: amount_msat };
+               let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
  
                self.send_preflight_probes(route_params, liquidity_limit_multiplier)
        }
                                },
                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
                                        let mut updated_chan = false;
 -                                      let res = {
 +                                      {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
 -                                                                      updated_chan = true;
 -                                                                      handle_new_monitor_update!(self, funding_txo, update.clone(),
 -                                                                              peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
 +                                                                      if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
 +                                                                              updated_chan = true;
 +                                                                              handle_new_monitor_update!(self, funding_txo, update.clone(),
 +                                                                                      peer_state_lock, peer_state, per_peer_state, chan);
 +                                                                      } else {
 +                                                                              debug_assert!(false, "We shouldn't have an update for a non-funded channel");
 +                                                                      }
                                                                },
 -                                                              hash_map::Entry::Vacant(_) => Ok(()),
 +                                                              hash_map::Entry::Vacant(_) => {},
                                                        }
 -                                              } else { Ok(()) }
 -                                      };
 +                                              }
 +                                      }
                                        if !updated_chan {
                                                // TODO: Track this as in-flight even though the channel is closed.
                                                let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                        }
 -                                      // TODO: If this channel has since closed, we're likely providing a payment
 -                                      // preimage update, which we must ensure is durable! We currently don't,
 -                                      // however, ensure that.
 -                                      if res.is_err() {
 -                                              log_error!(self.logger,
 -                                                      "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
 -                                      }
 -                                      let _ = handle_error!(self, res, counterparty_node_id);
                                },
                                BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
                        let mut timed_out_mpp_htlcs = Vec::new();
                        let mut pending_peers_awaiting_removal = Vec::new();
 +                      let mut shutdown_channels = Vec::new();
  
 -                      let process_unfunded_channel_tick = |
 +                      let mut process_unfunded_channel_tick = |
                                chan_id: &ChannelId,
                                context: &mut ChannelContext<SP>,
                                unfunded_context: &mut UnfundedChannelContext,
                                                "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
                                        update_maps_on_chan_removal!(self, &context);
                                        self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
 -                                      self.finish_force_close_channel(context.force_shutdown(false));
 +                                      shutdown_channels.push(context.force_shutdown(false));
                                        pending_msg_events.push(MessageSendEvent::HandleError {
                                                node_id: counterparty_node_id,
                                                action: msgs::ErrorAction::SendErrorMessage {
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
  
 +                      for shutdown_res in shutdown_channels {
 +                              self.finish_force_close_channel(shutdown_res);
 +                      }
 +
                        self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
  
                        // Technically we don't need to do this here, but if we have holding cell entries in a
                // This ensures that future code doesn't introduce a lock-order requirement for
                // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
                // this function with any `per_peer_state` peer lock acquired would.
 +              #[cfg(debug_assertions)]
                for (_, peer) in self.per_peer_state.read().unwrap().iter() {
                        debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
                }
                                                                peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                        }
                                                        if !during_init {
 -                                                              let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
 -                                                                      peer_state, per_peer_state, chan_phase_entry);
 -                                                              if let Err(e) = res {
 -                                                                      // TODO: This is a *critical* error - we probably updated the outbound edge
 -                                                                      // of the HTLC's monitor with a preimage. We should retry this monitor
 -                                                                      // update over and over again until morale improves.
 -                                                                      log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
 -                                                                      return Err((counterparty_node_id, e));
 -                                                              }
 +                                                              handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
 +                                                                      peer_state, per_peer_state, chan);
                                                        } else {
                                                                // If we're running during init we cannot update a monitor directly -
                                                                // they probably haven't actually been loaded yet. Instead, push the
                                Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
 -                              match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
 +                              let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
 +                              match id_to_peer_lock.entry(chan.context.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
                                                        funding_msg.channel_id))
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
 -                                              i_e.insert(chan.context.get_counterparty_node_id());
 -                                      }
 -                              }
 -
 -                              // There's no problem signing a counterparty's funding transaction if our monitor
 -                              // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 -                              // accepted payment from yet. We do, however, need to wait to send our channel_ready
 -                              // until we have persisted our monitor.
 -                              let new_channel_id = funding_msg.channel_id;
 -                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 -                                      node_id: counterparty_node_id.clone(),
 -                                      msg: funding_msg,
 -                              });
 +                                              let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 +                                              if let Ok(persist_state) = monitor_res {
 +                                                      i_e.insert(chan.context.get_counterparty_node_id());
 +                                                      mem::drop(id_to_peer_lock);
 +
 +                                                      // There's no problem signing a counterparty's funding transaction if our monitor
 +                                                      // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 +                                                      // accepted payment from yet. We do, however, need to wait to send our channel_ready
 +                                                      // until we have persisted our monitor.
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 +                                                              node_id: counterparty_node_id.clone(),
 +                                                              msg: funding_msg,
 +                                                      });
  
 -                              let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 -
 -                              if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
 -                                      let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
 -                                              per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
 -                                              { peer_state.channel_by_id.remove(&new_channel_id) });
 -
 -                                      // Note that we reply with the new channel_id in error messages if we gave up on the
 -                                      // channel, not the temporary_channel_id. This is compatible with ourselves, but the
 -                                      // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
 -                                      // any messages referencing a previously-closed channel anyway.
 -                                      // We do not propagate the monitor update to the user as it would be for a monitor
 -                                      // that we didn't manage to store (and that we don't care about - we don't respond
 -                                      // with the funding_signed so the channel can never go on chain).
 -                                      if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
 -                                              res.0 = None;
 +                                                      if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
 +                                                              handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
 +                                                                      per_peer_state, chan, INITIAL_MONITOR);
 +                                                      } else {
 +                                                              unreachable!("This must be a funded channel as we just inserted it.");
 +                                                      }
 +                                                      Ok(())
 +                                              } else {
 +                                                      log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
 +                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                                                              "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
 +                                                              funding_msg.channel_id));
 +                                              }
                                        }
 -                                      res.map(|_| ())
 -                              } else {
 -                                      unreachable!("This must be a funded channel as we just inserted it.");
                                }
                        }
                }
                                        ChannelPhase::Funded(ref mut chan) => {
                                                let monitor = try_chan_phase_entry!(self,
                                                        chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
 -                                              let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
 -                                              let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
 -                                              if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
 -                                                      // We weren't able to watch the channel to begin with, so no updates should be made on
 -                                                      // it. Previously, full_stack_target found an (unreachable) panic when the
 -                                                      // monitor update contained within `shutdown_finish` was applied.
 -                                                      if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
 -                                                              shutdown_finish.0.take();
 -                                                      }
 +                                              if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
 +                                                      handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
 +                                                      Ok(())
 +                                              } else {
 +                                                      try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
                                                }
 -                                              res.map(|_| ())
                                        },
                                        _ => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
        }
  
        fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
 -              let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
 -              let result: Result<(), _> = loop {
 +              let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
 +              let mut finish_shutdown = None;
 +              {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                }
                                                // Update the monitor with the shutdown script if necessary.
                                                if let Some(monitor_update) = monitor_update_opt {
 -                                                      break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
                                                }
 -                                              break Ok(());
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
                                                log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
                                                self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
                                                let mut chan = remove_channel_phase!(self, chan_phase_entry);
 -                                              self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
 -                                              return Ok(());
 +                                              finish_shutdown = Some(chan.context_mut().force_shutdown(false));
                                        },
                                }
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
 -              };
 +              }
                for htlc_source in dropped_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
 +              if let Some(shutdown_res) = finish_shutdown {
 +                      self.finish_force_close_channel(shutdown_res);
 +              }
  
 -              result
 +              Ok(())
        }
  
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
                                        let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
 -                                                      peer_state, per_peer_state, chan_phase_entry).map(|_| ())
 -                                      } else { Ok(()) }
 +                                                      peer_state, per_peer_state, chan);
 +                                      }
 +                                      Ok(())
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
        }
  
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
 -              let (htlcs_to_fail, res) = {
 +              let htlcs_to_fail = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
                                                        chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
 -                                              let res = if let Some(monitor_update) = monitor_update_opt {
 +                                              if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
                                                        handle_new_monitor_update!(self, funding_txo, monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
 -                                              } else { Ok(()) };
 -                                              (htlcs_to_fail, res)
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
 +                                              }
 +                                              htlcs_to_fail
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                        "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
 -              res
 +              Ok(())
        }
  
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
 -                                      MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
 -                                      MonitorEvent::UpdateFailed(funding_outpoint) => {
 +                                      MonitorEvent::HolderForceClosed(funding_outpoint) => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                                                msg: update
                                                                                        });
                                                                                }
 -                                                                              let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
 -                                                                                      ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
 -                                                                              } else {
 -                                                                                      ClosureReason::CommitmentTxConfirmed
 -                                                                              };
 -                                                                              self.issue_channel_close_events(&chan.context, reason);
 +                                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::SendErrorMessage {
        fn check_free_holding_cells(&self) -> bool {
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
 -              let mut handle_errors = Vec::new();
  
                // Walk our list of channels and find any that need to update. Note that when we do find an
                // update, if it includes actions that must be taken afterwards, we have to drop the
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
  
 -                                                      let channel_id: ChannelId = *channel_id;
 -                                                      let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
 -                                                              peer_state.channel_by_id.remove(&channel_id));
 -                                                      if res.is_err() {
 -                                                              handle_errors.push((counterparty_node_id, res));
 -                                                      }
 +                                                      handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
                                                        continue 'peer_loop;
                                                }
                                        }
                        break 'peer_loop;
                }
  
 -              let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
 +              let has_update = has_monitor_update || !failed_htlcs.is_empty();
                for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
                        self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
                }
  
 -              for (counterparty_node_id, err) in handle_errors.drain(..) {
 -                      let _ = handle_error!(self, err, counterparty_node_id);
 -              }
 -
                has_update
        }
  
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
 -              let mut errors = Vec::new();
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_funding_outpoint.to_channel_id());
 -                                                      if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
 -                                                              peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
 -                                                      {
 -                                                              errors.push((e, counterparty_node_id));
 -                                                      }
 +                                                      handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
 +                                                              peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
                                                                // If there are more `ChannelMonitorUpdate`s to process, restart at the
                                                                // top of the loop.
                        }
                        break;
                }
 -              for (err, counterparty_node_id) in errors {
 -                      let res = Err::<(), _>(err);
 -                      let _ = handle_error!(self, res, counterparty_node_id);
 -              }
        }
  
        fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
@@@ -9496,6 -9559,7 +9496,7 @@@ wher
                                                                                pending_fee_msat: Some(path_fee),
                                                                                total_msat: path_amt,
                                                                                starting_block_height: best_block_height,
+                                                                               remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup
                                                                        });
                                                                        log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
                                                                                path_amt, &htlc.payment_hash,  log_bytes!(session_priv_bytes));
@@@ -10127,7 -10191,7 +10128,7 @@@ mod tests 
                        TEST_FINAL_CLTV, false), 100_000);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
 -                      None, nodes[0].logger, &scorer, &(), &random_seed_bytes
 +                      None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
 -                      None, nodes[0].logger, &scorer, &(), &random_seed_bytes
 +                      None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
                );
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
 -                      None, nodes[0].logger, &scorer, &(), &random_seed_bytes
 +                      None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_id_2 = PaymentId([45; 32]);
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
 -                      nodes[0].logger, &scorer, &(), &random_seed_bytes
 +                      nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
  
                let test_preimage = PaymentPreimage([42; 32]);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
 -                      nodes[0].logger, &scorer, &(), &random_seed_bytes
 +                      nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
  
                let test_preimage = PaymentPreimage([42; 32]);
index 82fa06f5c80b8a5494b9b546d4e5ccb0d4287999,8f27cf1ad53979344e98e8a9d5eede2c5f338d49..0e19ceb65832e47db0e3fb7d5165361acf7282b1
@@@ -422,10 -422,6 +422,10 @@@ pub struct Node<'chan_man, 'node_cfg: '
                &'chan_mon_cfg test_utils::TestLogger,
        >,
  }
 +#[cfg(feature = "std")]
 +impl<'a, 'b, 'c> std::panic::UnwindSafe for Node<'a, 'b, 'c> {}
 +#[cfg(feature = "std")]
 +impl<'a, 'b, 'c> std::panic::RefUnwindSafe for Node<'a, 'b, 'c> {}
  impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
        pub fn best_block_hash(&self) -> BlockHash {
                self.blocks.lock().unwrap().last().unwrap().0.block_hash()
@@@ -582,7 -578,7 +582,7 @@@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
 -                              if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != ChannelMonitorUpdateStatus::Completed {
 +                              if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
                                        panic!();
                                }
                        }
@@@ -981,7 -977,7 +981,7 @@@ pub fn _reload_node<'a, 'b, 'c>(node: &
  
        for monitor in monitors_read.drain(..) {
                assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
 -                      ChannelMonitorUpdateStatus::Completed);
 +                      Ok(ChannelMonitorUpdateStatus::Completed));
                check_added_monitors!(node, 1);
        }
  
@@@ -1858,7 -1854,7 +1858,7 @@@ pub fn get_route(send_node: &Node, rout
        router::get_route(
                &send_node.node.get_our_node_id(), route_params, &send_node.network_graph.read_only(),
                Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
 -              send_node.logger, &scorer, &(), &random_seed_bytes
 +              send_node.logger, &scorer, &Default::default(), &random_seed_bytes
        )
  }
  
@@@ -1882,7 -1878,11 +1882,11 @@@ macro_rules! get_route_and_payment_has
                $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value)
        }};
        ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr) => {{
-               let route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
+               $crate::get_route_and_payment_hash!($send_node, $recv_node, $payment_params, $recv_value, None)
+       }};
+       ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr, $max_total_routing_fee_msat: expr) => {{
+               let mut route_params = $crate::routing::router::RouteParameters::from_payment_params_and_value($payment_params, $recv_value);
+               route_params.max_total_routing_fee_msat = $max_total_routing_fee_msat;
                let (payment_preimage, payment_hash, payment_secret) =
                        $crate::ln::functional_test_utils::get_payment_preimage_hash(&$recv_node, Some($recv_value), None);
                let route = $crate::ln::functional_test_utils::get_route(&$send_node, &route_params);
@@@ -2510,7 -2510,7 +2514,7 @@@ pub fn route_over_limit<'a, 'b, 'c>(ori
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
        let route = router::get_route(&origin_node.node.get_our_node_id(), &route_params, &network_graph,
 -              None, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
 +              None, origin_node.logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
index 86c31aac6c606aad848a0bc1f12e283e09334c73,5859cbe54aa32e1850607b32218fc03bedab6dd8..cdf5627a7aac6013371cd2b55e3cf14224a8423a
@@@ -54,10 -54,14 +54,14 @@@ pub(crate) enum PendingOutboundPayment 
        AwaitingInvoice {
                timer_ticks_without_response: u8,
                retry_strategy: Retry,
+               max_total_routing_fee_msat: Option<u64>,
        },
        InvoiceReceived {
                payment_hash: PaymentHash,
                retry_strategy: Retry,
+               // Note this field is currently just replicated from AwaitingInvoice but not actually
+               // used anywhere.
+               max_total_routing_fee_msat: Option<u64>,
        },
        Retryable {
                retry_strategy: Option<Retry>,
@@@ -76,6 -80,7 +80,7 @@@
                total_msat: u64,
                /// Our best known block height at the time this payment was initiated.
                starting_block_height: u32,
+               remaining_max_total_routing_fee_msat: Option<u64>,
        },
        /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
        /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart
@@@ -210,11 -215,19 +215,19 @@@ impl PendingOutboundPayment 
                                PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
                };
                if remove_res {
-                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
-                               let path = path.expect("Fulfilling a payment should always come with a path");
+                       if let PendingOutboundPayment::Retryable {
+                               ref mut pending_amt_msat, ref mut pending_fee_msat,
+                               ref mut remaining_max_total_routing_fee_msat, ..
+                       } = self {
+                               let path = path.expect("Removing a failed payment should always come with a path");
                                *pending_amt_msat -= path.final_value_msat();
+                               let path_fee_msat = path.fee_msat();
                                if let Some(fee_msat) = pending_fee_msat.as_mut() {
-                                       *fee_msat -= path.fee_msat();
+                                       *fee_msat -= path_fee_msat;
+                               }
+                               if let Some(max_total_routing_fee_msat) = remaining_max_total_routing_fee_msat.as_mut() {
+                                       *max_total_routing_fee_msat = max_total_routing_fee_msat.saturating_add(path_fee_msat);
                                }
                        }
                }
                        PendingOutboundPayment::Abandoned { .. } => false,
                };
                if insert_res {
-                       if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
-                               *pending_amt_msat += path.final_value_msat();
-                               if let Some(fee_msat) = pending_fee_msat.as_mut() {
-                                       *fee_msat += path.fee_msat();
-                               }
+                       if let PendingOutboundPayment::Retryable {
+                               ref mut pending_amt_msat, ref mut pending_fee_msat,
+                               ref mut remaining_max_total_routing_fee_msat, .. 
+                       } = self {
+                                       *pending_amt_msat += path.final_value_msat();
+                                       let path_fee_msat = path.fee_msat();
+                                       if let Some(fee_msat) = pending_fee_msat.as_mut() {
+                                               *fee_msat += path_fee_msat;
+                                       }
+                                       if let Some(max_total_routing_fee_msat) = remaining_max_total_routing_fee_msat.as_mut() {
+                                               *max_total_routing_fee_msat = max_total_routing_fee_msat.saturating_sub(path_fee_msat);
+                                       }
                        }
                }
                insert_res
@@@ -731,12 -752,15 +752,15 @@@ impl OutboundPayments 
                SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
        {
                let payment_hash = invoice.payment_hash();
+               let mut max_total_routing_fee_msat = None;
                match self.pending_outbound_payments.lock().unwrap().entry(payment_id) {
                        hash_map::Entry::Occupied(entry) => match entry.get() {
-                               PendingOutboundPayment::AwaitingInvoice { retry_strategy, .. } => {
+                               PendingOutboundPayment::AwaitingInvoice { retry_strategy, max_total_routing_fee_msat: max_total_fee, .. } => {
+                                       max_total_routing_fee_msat = *max_total_fee;
                                        *entry.into_mut() = PendingOutboundPayment::InvoiceReceived {
                                                payment_hash,
                                                retry_strategy: *retry_strategy,
+                                               max_total_routing_fee_msat,
                                        };
                                },
                                _ => return Err(Bolt12PaymentError::DuplicateInvoice),
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
                        final_value_msat: invoice.amount_msats(),
+                       max_total_routing_fee_msat,
                };
  
                self.find_route_and_send_payment(
                        let mut retry_id_route_params = None;
                        for (pmt_id, pmt) in outbounds.iter_mut() {
                                if pmt.is_auto_retryable_now() {
-                                       if let PendingOutboundPayment::Retryable { pending_amt_msat, total_msat, payment_params: Some(params), payment_hash, .. } = pmt {
+                                       if let PendingOutboundPayment::Retryable { pending_amt_msat, total_msat, payment_params: Some(params), payment_hash, remaining_max_total_routing_fee_msat, .. } = pmt {
                                                if pending_amt_msat < total_msat {
                                                        retry_id_route_params = Some((*payment_hash, *pmt_id, RouteParameters {
                                                                final_value_msat: *total_msat - *pending_amt_msat,
                                                                payment_params: params.clone(),
+                                                               max_total_routing_fee_msat: *remaining_max_total_routing_fee_msat,
                                                        }));
                                                        break
                                                }
                                                        log_error!(logger, "Payment not yet sent");
                                                        return
                                                },
-                                               PendingOutboundPayment::InvoiceReceived { payment_hash, retry_strategy } => {
+                                               PendingOutboundPayment::InvoiceReceived { payment_hash, retry_strategy, .. } => {
                                                        let total_amount = route_params.final_value_msat;
                                                        let recipient_onion = RecipientOnionFields {
                                                                payment_secret: None,
                        custom_tlvs: recipient_onion.custom_tlvs,
                        starting_block_height: best_block_height,
                        total_msat: route.get_total_amount(),
+                       remaining_max_total_routing_fee_msat:
+                               route.route_params.as_ref().and_then(|p| p.max_total_routing_fee_msat),
                };
  
                for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
  
        #[allow(unused)]
        pub(super) fn add_new_awaiting_invoice(
-               &self, payment_id: PaymentId, retry_strategy: Retry
+               &self, payment_id: PaymentId, retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
        ) -> Result<(), ()> {
                let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
                match pending_outbounds.entry(payment_id) {
                                entry.insert(PendingOutboundPayment::AwaitingInvoice {
                                        timer_ticks_without_response: 0,
                                        retry_strategy,
+                                       max_total_routing_fee_msat,
                                });
  
                                Ok(())
                let mut has_ok = false;
                let mut has_err = false;
                let mut pending_amt_unsent = 0;
+               let mut total_ok_fees_msat = 0;
                for (res, path) in results.iter().zip(route.paths.iter()) {
-                       if res.is_ok() { has_ok = true; }
+                       if res.is_ok() {
+                               has_ok = true;
+                               total_ok_fees_msat += path.fee_msat();
+                       }
                        if res.is_err() { has_err = true; }
                        if let &Err(APIError::MonitorUpdateInProgress) = res {
                                // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
                                // PartialFailure.
                                has_err = true;
                                has_ok = true;
+                               total_ok_fees_msat += path.fee_msat();
                        } else if res.is_err() {
                                pending_amt_unsent += path.final_value_msat();
                        }
                                results,
                                payment_id,
                                failed_paths_retry: if pending_amt_unsent != 0 {
-                                       if let Some(payment_params) = route.route_params.as_ref().map(|p| p.payment_params.clone()) {
-                                               Some(RouteParameters {
-                                                       payment_params: payment_params,
-                                                       final_value_msat: pending_amt_unsent,
-                                               })
+                                       if let Some(route_params) = &route.route_params {
+                                               let mut route_params = route_params.clone();
+                                               // We calculate the leftover fee budget we're allowed to spend by
+                                               // subtracting the used fee from the total fee budget.
+                                               route_params.max_total_routing_fee_msat = route_params
+                                                       .max_total_routing_fee_msat.map(|m| m.saturating_sub(total_ok_fees_msat));
+                                               route_params.final_value_msat = pending_amt_unsent;
+                                               Some(route_params)
                                        } else { None }
                                } else { None },
                        })
        ) -> bool where L::Target: Logger {
                #[cfg(test)]
                let DecodedOnionFailure {
 -                      network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data
 +                      network_update, short_channel_id, payment_failed_permanently, onion_error_code,
 +                      onion_error_data
                } = onion_error.decode_onion_failure(secp_ctx, logger, &source);
                #[cfg(not(test))]
 -              let DecodedOnionFailure { network_update, short_channel_id, payment_retryable } =
 +              let DecodedOnionFailure { network_update, short_channel_id, payment_failed_permanently } =
                        onion_error.decode_onion_failure(secp_ctx, logger, &source);
  
                let payment_is_probe = payment_is_probe(payment_hash, &payment_id, probing_cookie_secret);
                                payment.get_mut().insert_previously_failed_scid(scid);
                        }
  
 -                      if payment_is_probe || !is_retryable_now || !payment_retryable {
 -                              let reason = if !payment_retryable {
 +                      if payment_is_probe || !is_retryable_now || payment_failed_permanently {
 +                              let reason = if payment_failed_permanently {
                                        PaymentFailureReason::RecipientRejected
                                } else {
                                        PaymentFailureReason::RetriesExhausted
                                is_retryable_now = false;
                        }
                        if payment.get().remaining_parts() == 0 {
-                               if let PendingOutboundPayment::Abandoned { payment_hash, reason, .. }= payment.get() {
+                               if let PendingOutboundPayment::Abandoned { payment_hash, reason, .. } = payment.get() {
                                        if !payment_is_probe {
                                                full_failure_ev = Some(events::Event::PaymentFailed {
                                                        payment_id: *payment_id,
  
                let path_failure = {
                        if payment_is_probe {
 -                              if !payment_retryable {
 +                              if payment_failed_permanently {
                                        events::Event::ProbeSuccessful {
                                                payment_id: *payment_id,
                                                payment_hash: payment_hash.clone(),
                                events::Event::PaymentPathFailed {
                                        payment_id: Some(*payment_id),
                                        payment_hash: payment_hash.clone(),
 -                                      payment_failed_permanently: !payment_retryable,
 +                                      payment_failed_permanently,
                                        failure: events::PathFailure::OnPath { network_update },
                                        path: path.clone(),
                                        short_channel_id,
@@@ -1690,6 -1727,7 +1728,7 @@@ impl_writeable_tlv_based_enum_upgradabl
                (8, pending_amt_msat, required),
                (9, custom_tlvs, optional_vec),
                (10, starting_block_height, required),
+               (11, remaining_max_total_routing_fee_msat, option),
                (not_written, retry_strategy, (static_value, None)),
                (not_written, attempts, (static_value, PaymentAttempts::new())),
        },
        (5, AwaitingInvoice) => {
                (0, timer_ticks_without_response, required),
                (2, retry_strategy, required),
+               (4, max_total_routing_fee_msat, option),
        },
        (7, InvoiceReceived) => {
                (0, payment_hash, required),
                (2, retry_strategy, required),
+               (4, max_total_routing_fee_msat, option),
        },
  );
  
@@@ -1927,7 -1967,9 +1968,9 @@@ mod tests 
                let payment_id = PaymentId([0; 32]);
  
                assert!(!outbound_payments.has_pending_payments());
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                for _ in 0..INVOICE_REQUEST_TIMEOUT_TICKS {
                );
                assert!(pending_events.lock().unwrap().is_empty());
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_err());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None)
+                               .is_err()
+               );
        }
  
        #[test]
                let payment_id = PaymentId([0; 32]);
  
                assert!(!outbound_payments.has_pending_payments());
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                outbound_payments.abandon_payment(
                let outbound_payments = OutboundPayments::new();
                let payment_id = PaymentId([0; 32]);
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                let created_at = now() - DEFAULT_RELATIVE_EXPIRY;
                let outbound_payments = OutboundPayments::new();
                let payment_id = PaymentId([0; 32]);
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .sign(recipient_sign).unwrap();
  
                router.expect_find_route(
-                       RouteParameters {
-                               payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
-                               final_value_msat: invoice.amount_msats(),
-                       },
+                       RouteParameters::from_payment_params_and_value(
+                               PaymentParameters::from_bolt12_invoice(&invoice),
+                               invoice.amount_msats(),
+                       ),
                        Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }),
                );
  
                let outbound_payments = OutboundPayments::new();
                let payment_id = PaymentId([0; 32]);
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), None).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
                        .build().unwrap()
                        .sign(recipient_sign).unwrap();
  
-               let route_params = RouteParameters {
-                       payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
-                       final_value_msat: invoice.amount_msats(),
-               };
+               let route_params = RouteParameters::from_payment_params_and_value(
+                       PaymentParameters::from_bolt12_invoice(&invoice),
+                       invoice.amount_msats(),
+               );
                router.expect_find_route(
                        route_params.clone(), Ok(Route { paths: vec![], route_params: Some(route_params) })
                );
                let route_params = RouteParameters {
                        payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
                        final_value_msat: invoice.amount_msats(),
+                       max_total_routing_fee_msat: Some(1234),
                };
                router.expect_find_route(
                        route_params.clone(),
                assert!(!outbound_payments.has_pending_payments());
                assert!(pending_events.lock().unwrap().is_empty());
  
-               assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+               assert!(
+                       outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0), Some(1234)).is_ok()
+               );
                assert!(outbound_payments.has_pending_payments());
  
                assert_eq!(
index 19f55f4f2d30ea64b62db1bca27263e8ff70f186,94c3bf668f7da7ed0ca4b99f2f4d7c287e95ca0d..21c4881ab1a08295cf7989ea99d3a47561f9656f
@@@ -83,7 -83,11 +83,11 @@@ fn mpp_retry() 
        send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
  
        let amt_msat = 1_000_000;
-       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], amt_msat);
+       let max_total_routing_fee_msat = 50_000;
+       let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(
+               nodes[0], nodes[3], payment_params, amt_msat, Some(max_total_routing_fee_msat));
        let path = route.paths[0].clone();
        route.paths.push(path);
        route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
        route.paths.remove(0);
        route_params.final_value_msat = 1_000_000;
        route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
+       // Check the remaining max total routing fee for the second attempt is 50_000 - 1_000 msat fee
+       // used by the first path
+       route_params.max_total_routing_fee_msat = Some(max_total_routing_fee_msat - 1_000);
        nodes[0].router.expect_find_route(route_params, Ok(route));
        nodes[0].node.process_pending_htlc_forwards();
        check_added_monitors!(nodes[0], 1);
        claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
  }
  
+ #[test]
+ fn mpp_retry_overpay() {
+       // We create an MPP scenario with two paths in which we need to overpay to reach
+       // htlc_minimum_msat. We then fail the overpaid path and check that on retry our
+       // max_total_routing_fee_msat only accounts for the path's fees, but not for the fees overpaid
+       // in the first attempt.
+       let chanmon_cfgs = create_chanmon_cfgs(4);
+       let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+       let mut user_config = test_default_channel_config();
+       user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+       let mut limited_config_1 = user_config.clone();
+       limited_config_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000;
+       let mut limited_config_2 = user_config.clone();
+       limited_config_2.channel_handshake_config.our_htlc_minimum_msat = 34_500_000;
+       let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs,
+               &[Some(user_config), Some(limited_config_1), Some(limited_config_2), Some(user_config)]);
+       let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+       let (chan_1_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0);
+       let (chan_2_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0);
+       let (_chan_3_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0);
+       let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0);
+       let amt_msat = 70_000_000;
+       let max_total_routing_fee_msat = Some(1_000_000);
+       let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+               .with_bolt11_features(nodes[3].node.invoice_features()).unwrap();
+       let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(
+               nodes[0], nodes[3], payment_params, amt_msat, max_total_routing_fee_msat);
+       // Check we overpay on the second path which we're about to fail.
+       assert_eq!(chan_1_update.contents.fee_proportional_millionths, 0);
+       let overpaid_amount_1 = route.paths[0].fee_msat() as u32 - chan_1_update.contents.fee_base_msat;
+       assert_eq!(overpaid_amount_1, 0);
+       assert_eq!(chan_2_update.contents.fee_proportional_millionths, 0);
+       let overpaid_amount_2 = route.paths[1].fee_msat() as u32 - chan_2_update.contents.fee_base_msat;
+       let total_overpaid_amount = overpaid_amount_1 + overpaid_amount_2;
+       // Initiate the payment.
+       let payment_id = PaymentId(payment_hash.0);
+       let mut route_params = route.route_params.clone().unwrap();
+       nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+       nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+               payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
+       check_added_monitors!(nodes[0], 2); // one monitor per path
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 2);
+       // Pass half of the payment along the success path.
+       let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+       pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, payment_hash,
+               Some(payment_secret), success_path_msgs, false, None);
+       // Add the HTLC along the first hop.
+       let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+       let (update_add, commitment_signed) = match fail_path_msgs_1 {
+               MessageSendEvent::UpdateHTLCs {
+                       node_id: _,
+                       updates: msgs::CommitmentUpdate {
+                                       ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs,
+                                       ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed
+                       }
+               } => {
+                       assert_eq!(update_add_htlcs.len(), 1);
+                       assert!(update_fail_htlcs.is_empty());
+                       assert!(update_fulfill_htlcs.is_empty());
+                       assert!(update_fail_malformed_htlcs.is_empty());
+                       assert!(update_fee.is_none());
+                       (update_add_htlcs[0].clone(), commitment_signed.clone())
+               },
+               _ => panic!("Unexpected event"),
+       };
+       nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
+       commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
+       // Attempt to forward the payment and complete the 2nd path's failure.
+       expect_pending_htlcs_forwardable!(&nodes[2]);
+       expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2],
+               vec![HTLCDestination::NextHopChannel {
+                       node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id
+               }]
+       );
+       let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
+       assert!(htlc_updates.update_add_htlcs.is_empty());
+       assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+       assert!(htlc_updates.update_fulfill_htlcs.is_empty());
+       assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
+       check_added_monitors!(nodes[2], 1);
+       nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(),
+               &htlc_updates.update_fail_htlcs[0]);
+       commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
+       let mut events = nodes[0].node.get_and_clear_pending_events();
+       match events[1] {
+               Event::PendingHTLCsForwardable { .. } => {},
+               _ => panic!("Unexpected event")
+       }
+       events.remove(1);
+       expect_payment_failed_conditions_event(events, payment_hash, false,
+               PaymentFailedConditions::new().mpp_parts_remain());
+       // Rebalance the channel so the second half of the payment can succeed.
+       send_payment(&nodes[3], &vec!(&nodes[2])[..], 38_000_000);
+       // Retry the second half of the payment and make sure it succeeds.
+       let first_path_value = route.paths[0].final_value_msat();
+       assert_eq!(first_path_value, 36_000_000);
+       route.paths.remove(0);
+       route_params.final_value_msat -= first_path_value;
+       route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
+       // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat
+       // base fee, but not for overpaid value of the first try.
+       route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 1000);
+       nodes[0].router.expect_find_route(route_params, Ok(route));
+       nodes[0].node.process_pending_htlc_forwards();
+       check_added_monitors!(nodes[0], 1);
+       let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash,
+               Some(payment_secret), events.pop().unwrap(), true, None);
+       // Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the
+       // individual steps here.
+       let extra_fees = vec![0, total_overpaid_amount];
+       let expected_total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees(
+               &nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], &extra_fees[..], false,
+               payment_preimage);
+       expect_payment_sent!(&nodes[0], payment_preimage, Some(expected_total_fee_msat));
+ }
  fn do_mpp_receive_timeout(send_partial_mpp: bool) {
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
@@@ -267,7 -410,7 +410,7 @@@ fn do_test_keysend_payments(public_node
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route = find_route(
                &payer_pubkey, &route_params, &network_graph, first_hops,
 -              nodes[0].logger, &scorer, &(), &random_seed_bytes
 +              nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
        ).unwrap();
  
        {
@@@ -320,7 -463,7 +463,7 @@@ fn test_mpp_keysend() 
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
        let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger,
 -              &scorer, &(), &random_seed_bytes).unwrap();
 +              &scorer, &Default::default(), &random_seed_bytes).unwrap();
  
        let payment_preimage = PaymentPreimage([42; 32]);
        let payment_secret = PaymentSecret(payment_preimage.0);
@@@ -1106,7 -1249,7 +1249,7 @@@ fn get_ldk_payment_preimage() 
        let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
                &nodes[0].network_graph.read_only(),
                Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()), nodes[0].logger,
 -              &scorer, &(), &random_seed_bytes).unwrap();
 +              &scorer, &Default::default(), &random_seed_bytes).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
                RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@@ -1337,7 -1480,7 +1480,7 @@@ fn preflight_probes_yield_event_and_ski
        let mut payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_bolt11_features(invoice_features).unwrap();
  
-       let route_params = RouteParameters { payment_params, final_value_msat: 80_000_000 };
+       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 80_000_000);
        let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap();
  
        // We check that only one probe was sent, the other one was skipped due to limited liquidity.
@@@ -1750,9 -1893,9 +1893,9 @@@ fn do_test_intercepted_payment(test: In
                ]).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat,);
 -      let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
 -              &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &(),
 -              &random_seed_bytes,).unwrap();
 +      let route = get_route(&nodes[0].node.get_our_node_id(), &route_params,
 +              &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(),
 +              &random_seed_bytes).unwrap();
  
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
@@@ -2258,14 -2401,12 +2401,14 @@@ fn auto_retry_partial_failure() 
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
 +      // Open three channels, the first has plenty of liquidity, the second and third have ~no
 +      // available liquidity, causing any outbound payments routed over it to fail immediately.
        let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      let chan_3_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 +      let chan_2_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
 +      let chan_3_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
  
        // Marshall data to send the payment
 -      let amt_msat = 20_000;
 +      let amt_msat = 10_000_000;
        let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
        #[cfg(feature = "std")]
        let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
                .with_bolt11_features(invoice_features).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
  
 -      // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
 -      // second (for the initial send path2 over chan_2) fails.
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 -      // Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
 -      // the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -
        // Configure the initial send, retry1 and retry2's paths.
        let send_route = Route {
                paths: vec![
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
 -      let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(closed_chan_events.len(), 4);
 -      match closed_chan_events[0] {
 -              Event::ChannelClosed { .. } => {},
 -              _ => panic!("Unexpected event"),
 -      }
 -      match closed_chan_events[1] {
 +      let payment_failed_events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(payment_failed_events.len(), 2);
 +      match payment_failed_events[0] {
                Event::PaymentPathFailed { .. } => {},
                _ => panic!("Unexpected event"),
        }
 -      match closed_chan_events[2] {
 -              Event::ChannelClosed { .. } => {},
 -              _ => panic!("Unexpected event"),
 -      }
 -      match closed_chan_events[3] {
 +      match payment_failed_events[1] {
                Event::PaymentPathFailed { .. } => {},
                _ => panic!("Unexpected event"),
        }
  
        // Pass the first part of the payment along the path.
 -      check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
 +      check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out
        let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
  
 -      // First message is the first update_add, remaining messages are broadcasting channel updates and
 -      // errors for the permfailed channels
 -      assert_eq!(msg_events.len(), 5);
 +      // Only one HTLC/channel update actually made it out
 +      assert_eq!(msg_events.len(), 1);
        let mut payment_event = SendEvent::from_event(msg_events.remove(0));
  
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
@@@ -2461,13 -2621,12 +2604,13 @@@ fn auto_retry_zero_attempts_send_error(
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
 -      create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 +      // Open a single channel that does not have sufficient liquidity for the payment we want to
 +      // send.
 +      let chan_id  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
  
        // Marshall data to send the payment
 -      let amt_msat = 20_000;
 -      let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
 +      let amt_msat = 10_000_000;
 +      let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None);
        #[cfg(feature = "std")]
        let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
        #[cfg(not(feature = "std"))]
                .with_bolt11_features(invoice_features).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
  
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 +      // Override the route search to return a route, rather than failing at the route-finding step.
 +      let send_route = Route {
 +              paths: vec![
 +                      Path { hops: vec![RouteHop {
 +                              pubkey: nodes[1].node.get_our_node_id(),
 +                              node_features: nodes[1].node.node_features(),
 +                              short_channel_id: chan_id,
 +                              channel_features: nodes[1].node.channel_features(),
 +                              fee_msat: amt_msat,
 +                              cltv_expiry_delta: 100,
 +                              maybe_announced_channel: true,
 +                      }], blinded_tail: None },
 +              ],
 +              route_params: Some(route_params.clone()),
 +      };
 +      nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
 +
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
 -      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        let events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), 3);
 -      if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); }
 -      if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); }
 -      if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); }
 -      check_added_monitors!(nodes[0], 2);
 +      assert_eq!(events.len(), 2);
 +      if let Event::PaymentPathFailed { .. } = events[0] { } else { panic!(); }
 +      if let Event::PaymentFailed { .. } = events[1] { } else { panic!(); }
 +      check_added_monitors!(nodes[0], 0);
  }
  
  #[test]
@@@ -3127,7 -3271,9 +3270,9 @@@ fn test_threaded_payment_retries() 
        let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
                .with_expiry_time(payment_expiry_secs as u64)
                .with_bolt11_features(invoice_features).unwrap();
-       let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
+       let mut route_params = RouteParameters {
+               payment_params, final_value_msat: amt_msat, max_total_routing_fee_msat: Some(500_000),
+       };
  
        let mut route = Route {
                paths: vec![
                                maybe_announced_channel: true,
                        }], blinded_tail: None }
                ],
-               route_params: Some(RouteParameters::from_payment_params_and_value(
-                       PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV),
-                       amt_msat - amt_msat / 1000)),
+               route_params: Some(RouteParameters {
+                       payment_params: PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV),
+                       final_value_msat: amt_msat - amt_msat / 1000,
+                       max_total_routing_fee_msat: Some(500_000),
+               }),
        };
        nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
  
                let mut new_route_params = route_params.clone();
                previously_failed_channels.push(route.paths[0].hops[1].short_channel_id);
                new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
+               new_route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 100_000);
                route.paths[0].hops[1].short_channel_id += 1;
                nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
  
index 6e5c00e7665a4c04ccab6df38de89bef9ad7e5ba,c680d57a6dc4f05e97d838d038e2e90276f92874..c20ce2e97ee835ca0cf2c77b8c7526c08dba5855
@@@ -409,6 -409,7 +409,7 @@@ impl Writeable for Route 
                        (1, self.route_params.as_ref().map(|p| &p.payment_params), option),
                        (2, blinded_tails, optional_vec),
                        (3, self.route_params.as_ref().map(|p| p.final_value_msat), option),
+                       (5, self.route_params.as_ref().map(|p| p.max_total_routing_fee_msat), option),
                });
                Ok(())
        }
@@@ -436,6 -437,7 +437,7 @@@ impl Readable for Route 
                        (1, payment_params, (option: ReadableArgs, min_final_cltv_expiry_delta)),
                        (2, blinded_tails, optional_vec),
                        (3, final_value_msat, option),
+                       (5, max_total_routing_fee_msat, option)
                });
                let blinded_tails = blinded_tails.unwrap_or(Vec::new());
                if blinded_tails.len() != 0 {
                // If we previously wrote the corresponding fields, reconstruct RouteParameters.
                let route_params = match (payment_params, final_value_msat) {
                        (Some(payment_params), Some(final_value_msat)) => {
-                               Some(RouteParameters { payment_params, final_value_msat })
+                               Some(RouteParameters { payment_params, final_value_msat, max_total_routing_fee_msat })
                        }
                        _ => None,
                };
@@@ -467,12 -469,20 +469,20 @@@ pub struct RouteParameters 
  
        /// The amount in msats sent on the failed payment path.
        pub final_value_msat: u64,
+       /// The maximum total fees, in millisatoshi, that may accrue during route finding.
+       ///
+       /// This limit also applies to the total fees that may arise while retrying failed payment
+       /// paths.
+       ///
+       /// Default value: `None`
+       pub max_total_routing_fee_msat: Option<u64>,
  }
  
  impl RouteParameters {
        /// Constructs [`RouteParameters`] from the given [`PaymentParameters`] and a payment amount.
        pub fn from_payment_params_and_value(payment_params: PaymentParameters, final_value_msat: u64) -> Self {
-               Self { payment_params, final_value_msat }
+               Self { payment_params, final_value_msat, max_total_routing_fee_msat: None }
        }
  }
  
@@@ -480,6 -490,7 +490,7 @@@ impl Writeable for RouteParameters 
        fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
                write_tlv_fields!(writer, {
                        (0, self.payment_params, required),
+                       (1, self.max_total_routing_fee_msat, option),
                        (2, self.final_value_msat, required),
                        // LDK versions prior to 0.0.114 had the `final_cltv_expiry_delta` parameter in
                        // `RouteParameters` directly. For compatibility, we write it here.
@@@ -493,6 -504,7 +504,7 @@@ impl Readable for RouteParameters 
        fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
                _init_and_read_len_prefixed_tlv_fields!(reader, {
                        (0, payment_params, (required: ReadableArgs, 0)),
+                       (1, max_total_routing_fee_msat, option),
                        (2, final_value_msat, required),
                        (4, final_cltv_delta, option),
                });
                Ok(Self {
                        payment_params,
                        final_value_msat: final_value_msat.0.unwrap(),
+                       max_total_routing_fee_msat,
                })
        }
  }
@@@ -1680,6 -1693,7 +1693,7 @@@ where L::Target: Logger 
        let mut num_ignored_path_length_limit = 0;
        let mut num_ignored_cltv_delta_limit = 0;
        let mut num_ignored_previously_failed = 0;
+       let mut num_ignored_total_fee_limit = 0;
  
        macro_rules! add_entry {
                // Adds entry which goes from $src_node_id to $dest_node_id over the $candidate hop.
                                                                total_fee_msat = total_fee_msat.saturating_add(hop_use_fee_msat);
                                                        }
  
-                                                       let channel_usage = ChannelUsage {
-                                                               amount_msat: amount_to_transfer_over_msat,
-                                                               inflight_htlc_msat: used_liquidity_msat,
-                                                               effective_capacity,
-                                                       };
-                                                       let channel_penalty_msat = scid_opt.map_or(0,
-                                                               |scid| scorer.channel_penalty_msat(scid, &$src_node_id, &$dest_node_id,
-                                                                       channel_usage, score_params));
-                                                       let path_penalty_msat = $next_hops_path_penalty_msat
-                                                               .saturating_add(channel_penalty_msat);
-                                                       let new_graph_node = RouteGraphNode {
-                                                               node_id: $src_node_id,
-                                                               lowest_fee_to_node: total_fee_msat,
-                                                               total_cltv_delta: hop_total_cltv_delta,
-                                                               value_contribution_msat,
-                                                               path_htlc_minimum_msat,
-                                                               path_penalty_msat,
-                                                               path_length_to_node,
-                                                       };
-                                                       // Update the way of reaching $src_node_id with the given short_channel_id (from $dest_node_id),
-                                                       // if this way is cheaper than the already known
-                                                       // (considering the cost to "reach" this channel from the route destination,
-                                                       // the cost of using this channel,
-                                                       // and the cost of routing to the source node of this channel).
-                                                       // Also, consider that htlc_minimum_msat_difference, because we might end up
-                                                       // paying it. Consider the following exploit:
-                                                       // we use 2 paths to transfer 1.5 BTC. One of them is 0-fee normal 1 BTC path,
-                                                       // and for the other one we picked a 1sat-fee path with htlc_minimum_msat of
-                                                       // 1 BTC. Now, since the latter is more expensive, we gonna try to cut it
-                                                       // by 0.5 BTC, but then match htlc_minimum_msat by paying a fee of 0.5 BTC
-                                                       // to this channel.
-                                                       // Ideally the scoring could be smarter (e.g. 0.5*htlc_minimum_msat here),
-                                                       // but it may require additional tracking - we don't want to double-count
-                                                       // the fees included in $next_hops_path_htlc_minimum_msat, but also
-                                                       // can't use something that may decrease on future hops.
-                                                       let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat)
-                                                               .saturating_add(old_entry.path_penalty_msat);
-                                                       let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat)
-                                                               .saturating_add(path_penalty_msat);
-                                                       if !old_entry.was_processed && new_cost < old_cost {
-                                                               targets.push(new_graph_node);
-                                                               old_entry.next_hops_fee_msat = $next_hops_fee_msat;
-                                                               old_entry.hop_use_fee_msat = hop_use_fee_msat;
-                                                               old_entry.total_fee_msat = total_fee_msat;
-                                                               old_entry.node_id = $dest_node_id.clone();
-                                                               old_entry.candidate = $candidate.clone();
-                                                               old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel
-                                                               old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat;
-                                                               old_entry.path_penalty_msat = path_penalty_msat;
-                                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
-                                                               {
-                                                                       old_entry.value_contribution_msat = value_contribution_msat;
+                                                       // Ignore hops if augmenting the current path to them would put us over `max_total_routing_fee_msat`
+                                                       let max_total_routing_fee_msat = route_params.max_total_routing_fee_msat.unwrap_or(u64::max_value());
+                                                       if total_fee_msat > max_total_routing_fee_msat {
+                                                               if should_log_candidate {
+                                                                       log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate));
                                                                }
-                                                               did_add_update_path_to_src_node = Some(value_contribution_msat);
-                                                       } else if old_entry.was_processed && new_cost < old_cost {
-                                                               #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
-                                                               {
-                                                                       // If we're skipping processing a node which was previously
-                                                                       // processed even though we found another path to it with a
-                                                                       // cheaper fee, check that it was because the second path we
-                                                                       // found (which we are processing now) has a lower value
-                                                                       // contribution due to an HTLC minimum limit.
-                                                                       //
-                                                                       // e.g. take a graph with two paths from node 1 to node 2, one
-                                                                       // through channel A, and one through channel B. Channel A and
-                                                                       // B are both in the to-process heap, with their scores set by
-                                                                       // a higher htlc_minimum than fee.
-                                                                       // Channel A is processed first, and the channels onwards from
-                                                                       // node 1 are added to the to-process heap. Thereafter, we pop
-                                                                       // Channel B off of the heap, note that it has a much more
-                                                                       // restrictive htlc_maximum_msat, and recalculate the fees for
-                                                                       // all of node 1's channels using the new, reduced, amount.
-                                                                       //
-                                                                       // This would be bogus - we'd be selecting a higher-fee path
-                                                                       // with a lower htlc_maximum_msat instead of the one we'd
-                                                                       // already decided to use.
-                                                                       debug_assert!(path_htlc_minimum_msat < old_entry.path_htlc_minimum_msat);
-                                                                       debug_assert!(
-                                                                               value_contribution_msat + path_penalty_msat <
-                                                                               old_entry.value_contribution_msat + old_entry.path_penalty_msat
-                                                                       );
+                                                               num_ignored_total_fee_limit += 1;
+                                                       } else {
+                                                               let channel_usage = ChannelUsage {
+                                                                       amount_msat: amount_to_transfer_over_msat,
+                                                                       inflight_htlc_msat: used_liquidity_msat,
+                                                                       effective_capacity,
+                                                               };
+                                                               let channel_penalty_msat = scid_opt.map_or(0,
+                                                                       |scid| scorer.channel_penalty_msat(scid, &$src_node_id, &$dest_node_id,
+                                                                               channel_usage, score_params));
+                                                               let path_penalty_msat = $next_hops_path_penalty_msat
+                                                                       .saturating_add(channel_penalty_msat);
+                                                               let new_graph_node = RouteGraphNode {
+                                                                       node_id: $src_node_id,
+                                                                       lowest_fee_to_node: total_fee_msat,
+                                                                       total_cltv_delta: hop_total_cltv_delta,
+                                                                       value_contribution_msat,
+                                                                       path_htlc_minimum_msat,
+                                                                       path_penalty_msat,
+                                                                       path_length_to_node,
+                                                               };
+                                                               // Update the way of reaching $src_node_id with the given short_channel_id (from $dest_node_id),
+                                                               // if this way is cheaper than the already known
+                                                               // (considering the cost to "reach" this channel from the route destination,
+                                                               // the cost of using this channel,
+                                                               // and the cost of routing to the source node of this channel).
+                                                               // Also, consider that htlc_minimum_msat_difference, because we might end up
+                                                               // paying it. Consider the following exploit:
+                                                               // we use 2 paths to transfer 1.5 BTC. One of them is 0-fee normal 1 BTC path,
+                                                               // and for the other one we picked a 1sat-fee path with htlc_minimum_msat of
+                                                               // 1 BTC. Now, since the latter is more expensive, we gonna try to cut it
+                                                               // by 0.5 BTC, but then match htlc_minimum_msat by paying a fee of 0.5 BTC
+                                                               // to this channel.
+                                                               // Ideally the scoring could be smarter (e.g. 0.5*htlc_minimum_msat here),
+                                                               // but it may require additional tracking - we don't want to double-count
+                                                               // the fees included in $next_hops_path_htlc_minimum_msat, but also
+                                                               // can't use something that may decrease on future hops.
+                                                               let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat)
+                                                                       .saturating_add(old_entry.path_penalty_msat);
+                                                               let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat)
+                                                                       .saturating_add(path_penalty_msat);
+                                                               if !old_entry.was_processed && new_cost < old_cost {
+                                                                       targets.push(new_graph_node);
+                                                                       old_entry.next_hops_fee_msat = $next_hops_fee_msat;
+                                                                       old_entry.hop_use_fee_msat = hop_use_fee_msat;
+                                                                       old_entry.total_fee_msat = total_fee_msat;
+                                                                       old_entry.node_id = $dest_node_id.clone();
+                                                                       old_entry.candidate = $candidate.clone();
+                                                                       old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel
+                                                                       old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat;
+                                                                       old_entry.path_penalty_msat = path_penalty_msat;
+                                                                       #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
+                                                                       {
+                                                                               old_entry.value_contribution_msat = value_contribution_msat;
+                                                                       }
+                                                                       did_add_update_path_to_src_node = Some(value_contribution_msat);
+                                                               } else if old_entry.was_processed && new_cost < old_cost {
+                                                                       #[cfg(all(not(ldk_bench), any(test, fuzzing)))]
+                                                                       {
+                                                                               // If we're skipping processing a node which was previously
+                                                                               // processed even though we found another path to it with a
+                                                                               // cheaper fee, check that it was because the second path we
+                                                                               // found (which we are processing now) has a lower value
+                                                                               // contribution due to an HTLC minimum limit.
+                                                                               //
+                                                                               // e.g. take a graph with two paths from node 1 to node 2, one
+                                                                               // through channel A, and one through channel B. Channel A and
+                                                                               // B are both in the to-process heap, with their scores set by
+                                                                               // a higher htlc_minimum than fee.
+                                                                               // Channel A is processed first, and the channels onwards from
+                                                                               // node 1 are added to the to-process heap. Thereafter, we pop
+                                                                               // Channel B off of the heap, note that it has a much more
+                                                                               // restrictive htlc_maximum_msat, and recalculate the fees for
+                                                                               // all of node 1's channels using the new, reduced, amount.
+                                                                               //
+                                                                               // This would be bogus - we'd be selecting a higher-fee path
+                                                                               // with a lower htlc_maximum_msat instead of the one we'd
+                                                                               // already decided to use.
+                                                                               debug_assert!(path_htlc_minimum_msat < old_entry.path_htlc_minimum_msat);
+                                                                               debug_assert!(
+                                                                                       value_contribution_msat + path_penalty_msat <
+                                                                                       old_entry.value_contribution_msat + old_entry.path_penalty_msat
+                                                                               );
+                                                                       }
                                                                }
                                                        }
                                                }
                                        // Decrease the available liquidity of a hop in the middle of the path.
                                        let victim_candidate = &payment_path.hops[(payment_path.hops.len()) / 2].0.candidate;
                                        let exhausted = u64::max_value();
 -                                      log_trace!(logger, "Disabling route candidate {} for future path building iterations to
 -                                              avoid duplicates.", LoggedCandidateHop(victim_candidate));
 +                                      log_trace!(logger,
 +                                              "Disabling route candidate {} for future path building iterations to avoid duplicates.",
 +                                              LoggedCandidateHop(victim_candidate));
                                        *used_liquidities.entry(victim_candidate.id(false)).or_default() = exhausted;
                                        *used_liquidities.entry(victim_candidate.id(true)).or_default() = exhausted;
                                }
        }
  
        let num_ignored_total = num_ignored_value_contribution + num_ignored_path_length_limit +
-               num_ignored_cltv_delta_limit + num_ignored_previously_failed;
+               num_ignored_cltv_delta_limit + num_ignored_previously_failed + num_ignored_total_fee_limit;
        if num_ignored_total > 0 {
-               log_trace!(logger, "Ignored {} candidate hops due to insufficient value contribution, {} due to path length limit, {} due to CLTV delta limit, {} due to previous payment failure. Total: {} ignored candidates.", num_ignored_value_contribution, num_ignored_path_length_limit, num_ignored_cltv_delta_limit, num_ignored_previously_failed, num_ignored_total);
+               log_trace!(logger, "Ignored {} candidate hops due to insufficient value contribution, {} due to path length limit, {} due to CLTV delta limit, {} due to previous payment failure, {} due to maximum total fee limit. Total: {} ignored candidates.", num_ignored_value_contribution, num_ignored_path_length_limit, num_ignored_cltv_delta_limit, num_ignored_previously_failed, num_ignored_total_fee_limit, num_ignored_total);
        }
  
        // Step (5).
        // Make sure we would never create a route with more paths than we allow.
        debug_assert!(paths.len() <= payment_params.max_path_count.into());
  
+       // Make sure we would never create a route whose total fees exceed max_total_routing_fee_msat.
+       if let Some(max_total_routing_fee_msat) = route_params.max_total_routing_fee_msat {
+               if paths.iter().map(|p| p.fee_msat()).sum::<u64>() > max_total_routing_fee_msat {
+                       return Err(LightningError{err: format!("Failed to find route that adheres to the maximum total fee limit of {}msat",
+                               max_total_routing_fee_msat), action: ErrorAction::IgnoreError});
+               }
+       }
        if let Some(node_features) = payment_params.payee.node_features() {
                for path in paths.iter_mut() {
                        path.hops.last_mut().unwrap().node_features = node_features.clone();
@@@ -2705,7 -2735,7 +2736,7 @@@ fn build_route_from_hops_internal<L: De
  
        let scorer = HopScorer { our_node_id, hop_ids };
  
 -      get_route(our_node_pubkey, route_params, network_graph, None, logger, &scorer, &(), random_seed_bytes)
 +      get_route(our_node_pubkey, route_params, network_graph, None, logger, &scorer, &Default::default(), random_seed_bytes)
  }
  
  #[cfg(test)]
@@@ -2801,14 -2831,14 +2832,14 @@@ mod tests 
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 0);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes) {
 +                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "Cannot send a payment of 0 msat");
                } else { panic!(); }
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
                        &route_params, &network_graph.read_only(), Some(&our_chans.iter().collect::<Vec<_>>()),
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "First hop cannot have our_node_pubkey as a destination.");
                } else { panic!(); }
   
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 199_999_999);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes) {
 +                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
  
  
                // A payment above the minimum should pass
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
        }
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 60_000);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                // Overpay fees to hit htlc_minimum_msat.
                let overpaid_fees = route.paths[0].hops[0].fee_msat + route.paths[1].hops[0].fee_msat;
                // TODO: this could be better balanced to overpay 10k and not 15k.
                });
  
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                // Fine to overpay for htlc_minimum_msat if it allows us to save fee.
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops[0].short_channel_id, 12);
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 50_000);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                // Not fine to overpay for htlc_minimum_msat if it requires paying more than fee on
                // the other channel.
                assert_eq!(route.paths.len(), 1);
                // If all the channels require some features we don't understand, route should fail
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes) {
 +                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
  
                let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
                        InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
                let route = get_route(&our_id, &route_params, &network_graph.read_only(),
 -                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
                // If all nodes require some features we don't understand, route should fail
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes) {
 +                      &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!(); }
  
                let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
                        InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
                let route = get_route(&our_id, &route_params, &network_graph.read_only(),
 -                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
                let payment_params = PaymentParameters::from_node_id(nodes[0], 42);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 3);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                let our_chans = vec![get_channel_details(Some(42), nodes[7].clone(),
                        InitFeatures::from_le_bytes(vec![0b11]), 250_000_000)];
                let route = get_route(&our_id, &route_params, &network_graph.read_only(),
 -                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[7]);
                                .with_route_hints(invalid_last_hops).unwrap();
                        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                              &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer, &(),
 -                              &random_seed_bytes) {
 +                              &route_params, &network_graph.read_only(), None, Arc::clone(&logger), &scorer,
 +                              &Default::default(), &random_seed_bytes) {
                                        assert_eq!(err, "Route hint cannot have the payee as the source.");
                        } else { panic!(); }
                }
                        .with_route_hints(last_hops_multi_private_channels(&nodes)).unwrap();
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                // Test handling of an empty RouteHint passed in Invoice.
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &[42u8; 32]).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &[42u8; 32]).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                        .with_route_hints(last_hops.clone()).unwrap();
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(),
 -                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 2);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[3]);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 4);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                // ...but still use 8 for larger payments as 6 has a variable feerate
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 2000);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths[0].hops.len(), 5);
  
                assert_eq!(route.paths[0].hops[0].pubkey, nodes[1]);
                let network_graph = NetworkGraph::new(Network::Testnet, &logger);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, route_val);
                let route = get_route(&source_node_id, &route_params, &network_graph.read_only(),
 -                              Some(&our_chans.iter().collect::<Vec<_>>()), &logger, &scorer, &(),
 +                              Some(&our_chans.iter().collect::<Vec<_>>()), &logger, &scorer, &Default::default(),
                                &random_seed_bytes);
                route
        }
                                payment_params.clone(), 250_000_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None,
 -                                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
 +                                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 250_000_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(),&random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(),
                                        Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 -                                      &(), &random_seed_bytes) {
 +                                      &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 200_000_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(),
 -                              Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                              &random_seed_bytes).unwrap();
 +                              Some(&our_chans.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                              &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
                                payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 15_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
                                payment_params.clone(), 15_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 15_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
                                payment_params.clone(), 10_001);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 10_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let path = route.paths.last().unwrap();
                        assert_eq!(path.hops.len(), 2);
                                payment_params.clone(), 60_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 49_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 50_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 50_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                                payment_params.clone(), 300_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                &our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                                zero_payment_params, 100);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                &our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                        assert_eq!(err, "Can't find a route with no paths allowed.");
                        } else { panic!(); }
                }
                                fail_payment_params, 250_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                &our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes) {
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes) {
                                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 250_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 290_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                                payment_params.clone(), 350_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 300_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
  
                        let mut total_amount_paid_msat = 0;
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 180_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
  
                        let mut total_value_transferred_msat = 0;
                                payment_params.clone(), 210_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
  
+               {
+                       // Attempt to route while setting max_total_routing_fee_msat to 149_999 results in a failure.
+                       let route_params = RouteParameters { payment_params: payment_params.clone(), final_value_msat: 200_000,
+                               max_total_routing_fee_msat: Some(149_999) };
+                       if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
+                               &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
+                               &scorer, &(), &random_seed_bytes) {
+                                       assert_eq!(err, "Failed to find a sufficient route to the given destination");
+                       } else { panic!(); }
+               }
                {
                        // Now, attempt to route 200 sats (exact amount we can route).
-                       let route_params = RouteParameters::from_payment_params_and_value(payment_params, 200_000);
+                       let route_params = RouteParameters { payment_params: payment_params.clone(), final_value_msat: 200_000,
+                               max_total_routing_fee_msat: Some(150_000) };
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
  
                        let mut total_amount_paid_msat = 0;
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 100_000);
                let mut route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                route.paths.sort_by_key(|path| path.hops[0].short_channel_id);
                // Paths are manually ordered ordered by SCID, so:
                                payment_params.clone(), 150_000);
                        if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(
                                        &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger),
 -                                      &scorer, &(), &random_seed_bytes) {
 +                                      &scorer, &Default::default(), &random_seed_bytes) {
                                                assert_eq!(err, "Failed to find a sufficient route to the given destination");
                        } else { panic!(); }
                }
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 125_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 3);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 90_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        let mut total_amount_paid_msat = 0;
                        for path in &route.paths {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 10_000);
                        let route = get_route(&our_id, &route_params, &network.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 3);
  
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 90_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
  
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params, 90_000);
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 2);
  
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 200_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 10_000),
 -                      ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      ]), Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
  
                        let route = get_route(&our_id, &route_params, &network_graph.read_only(), Some(&[
                                &get_channel_details(Some(3), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(2), nodes[0], channelmanager::provided_init_features(&config), 50_000),
 -                      ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      ]), Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 2);
                        assert_eq!(route.paths[0].hops.len(), 1);
                        assert_eq!(route.paths[1].hops.len(), 1);
                                &get_channel_details(Some(8), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(9), nodes[0], channelmanager::provided_init_features(&config), 50_000),
                                &get_channel_details(Some(4), nodes[0], channelmanager::provided_init_features(&config), 1_000_000),
 -                      ]), Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      ]), Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                        assert_eq!(route.paths.len(), 1);
                        assert_eq!(route.paths[0].hops.len(), 1);
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 100);
                let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
  
                assert_eq!(route.get_total_fees(), 100);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 100);
                let route = get_route( &our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
  
                assert_eq!(route.get_total_fees(), 300);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 100);
                let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
  
                assert_eq!(route.get_total_fees(), 100);
                // A different path to nodes[6] exists if channel 6 cannot be routed over.
                let scorer = BadChannelScorer { short_channel_id: 6 };
                let route = get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
  
                assert_eq!(route.get_total_fees(), 300);
                // A path to nodes[6] does not exist if nodes[2] cannot be routed through.
                let scorer = BadNodeScorer { node_id: NodeId::from_pubkey(&nodes[2]) };
                match get_route( &our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes) {
 +                      &scorer, &Default::default(), &random_seed_bytes) {
                                Err(LightningError { err, .. } ) => {
                                        assert_eq!(err, "Failed to find a path to the given destination");
                                },
                let route_params = RouteParameters::from_payment_params_and_value(
                        feasible_payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert_ne!(path.len(), 0);
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        fail_payment_params, 100);
                match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
 -                      &(), &random_seed_bytes)
 +                      &Default::default(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 100);
                assert!(get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).is_ok());
 +                      &scorer, &Default::default(), &random_seed_bytes).is_ok());
                loop {
                        let route_params = RouteParameters::from_payment_params_and_value(
                                payment_params.clone(), 100);
                        if let Ok(route) = get_route(&our_id, &route_params, &network_graph, None,
 -                              Arc::clone(&logger), &scorer, &(), &random_seed_bytes)
 +                              Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes)
                        {
                                for chan in route.paths[0].hops.iter() {
                                        assert!(!payment_params.previously_failed_channels.contains(&chan.short_channel_id));
                let route_params = RouteParameters::from_payment_params_and_value(
                        feasible_payment_params, 100);
                let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                let path = route.paths[0].hops.iter().map(|hop| hop.short_channel_id).collect::<Vec<_>>();
                assert!(path.len() == MAX_PATH_LENGTH_ESTIMATE.into());
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        fail_payment_params, 100);
                match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
 -                      &(), &random_seed_bytes)
 +                      &Default::default(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. } ) => {
                                assert_eq!(err, "Failed to find a path to the given destination");
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 100);
                let route = get_route(&our_id, &route_params, &network_graph.read_only(), None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
  
                let cltv_expiry_deltas_before = route.paths[0].hops.iter().map(|h| h.cltv_expiry_delta).collect::<Vec<u32>>();
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), 100);
                let mut route = get_route(&our_id, &route_params, &network_graph, None,
 -                      Arc::clone(&logger), &scorer, &(), &random_seed_bytes).unwrap();
 +                      Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap();
                add_random_cltv_offset(&mut route, &payment_params, &network_graph, &random_seed_bytes);
  
                let mut path_plausibility = vec![];
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, max_htlc_msat + 1);
                if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id,
 -                      &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &(),
 +                      &route_params, &netgraph, None, Arc::clone(&logger), &scorer, &Default::default(),
                        &random_seed_bytes)
                {
                        assert_eq!(err, "Failed to find a sufficient route to the given destination");
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, max_htlc_msat + 1);
                let route = get_route(&our_id, &route_params, &netgraph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, amt_msat);
                let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
 -                      Some(&first_hop.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&first_hop.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
                        get_channel_details(Some(43), intermed_node_id, InitFeatures::from_le_bytes(vec![0b11]), amt_msat - 10),
                ];
                let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
 -                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, amt_msat);
                let route = get_route(&our_node_id, &route_params, &network_graph.read_only(),
 -                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                assert!(route.paths[0].hops.last().unwrap().fee_msat <= max_htlc_msat);
                assert!(route.paths[1].hops.last().unwrap().fee_msat <= max_htlc_msat);
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, 1001);
                let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 1);
                assert_eq!(route.paths[0].hops.len(), 2);
  
                        (blinded_payinfo.clone(), invalid_blinded_path_2)]);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
                match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes)
 +                      &scorer, &Default::default(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "1-hop blinded paths must all have matching introduction node ids");
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo.clone(), invalid_blinded_path.clone())]);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
                match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
 -                      &(), &random_seed_bytes)
 +                      &Default::default(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "Cannot generate a route to blinded paths if we are the introduction node to all of them");
                let payment_params = PaymentParameters::blinded(vec![(blinded_payinfo, invalid_blinded_path)]);
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1001);
                match get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger), &scorer,
 -                      &(), &random_seed_bytes)
 +                      &Default::default(), &random_seed_bytes)
                {
                        Err(LightningError { err, .. }) => {
                                assert_eq!(err, "0-hop blinded path provided");
  
                let route_params = RouteParameters::from_payment_params_and_value(payment_params, 100_000);
                let route = get_route(&our_id, &route_params, &network_graph, None, Arc::clone(&logger),
 -                      &scorer, &(), &random_seed_bytes).unwrap();
 +                      &scorer, &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.paths.len(), 2);
                let mut total_amount_paid_msat = 0;
                for path in route.paths.into_iter() {
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params.clone(), amt_msat);
                if let Err(LightningError { err, .. }) = get_route(&nodes[0], &route_params, &netgraph,
 -                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes) {
 +                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes) {
                                assert_eq!(err, "Failed to find a path to the given destination");
                } else { panic!("Expected error") }
  
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, amt_minus_blinded_path_fee);
                let route = get_route(&nodes[0], &route_params, &netgraph,
 -                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_minus_blinded_path_fee);
        }
                let route_params = RouteParameters::from_payment_params_and_value(
                        payment_params, amt_msat);
                let route = get_route(&nodes[0], &route_params, &netgraph,
 -                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer, &(),
 -                      &random_seed_bytes).unwrap();
 +                      Some(&first_hops.iter().collect::<Vec<_>>()), Arc::clone(&logger), &scorer,
 +                      &Default::default(), &random_seed_bytes).unwrap();
                assert_eq!(route.get_total_fees(), blinded_payinfo.fee_base_msat as u64);
                assert_eq!(route.get_total_amount(), amt_msat);
        }
@@@ -7320,15 -7362,15 +7363,15 @@@ pub mod benches 
                let logger = TestLogger::new();
                let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let scorer = FixedPenaltyScorer::with_penalty(0);
 -              generate_routes(bench, &network_graph, scorer, &(), Bolt11InvoiceFeatures::empty(), 0,
 -                      "generate_routes_with_zero_penalty_scorer");
 +              generate_routes(bench, &network_graph, scorer, &Default::default(),
 +                      Bolt11InvoiceFeatures::empty(), 0, "generate_routes_with_zero_penalty_scorer");
        }
  
        pub fn generate_mpp_routes_with_zero_penalty_scorer(bench: &mut Criterion) {
                let logger = TestLogger::new();
                let network_graph = bench_utils::read_network_graph(&logger).unwrap();
                let scorer = FixedPenaltyScorer::with_penalty(0);
 -              generate_routes(bench, &network_graph, scorer, &(),
 +              generate_routes(bench, &network_graph, scorer, &Default::default(),
                        channelmanager::provided_invoice_features(&UserConfig::default()), 0,
                        "generate_mpp_routes_with_zero_penalty_scorer");
        }
                        "generate_large_mpp_routes_with_probabilistic_scorer");
        }
  
 +      pub fn generate_routes_with_nonlinear_probabilistic_scorer(bench: &mut Criterion) {
 +              let logger = TestLogger::new();
 +              let network_graph = bench_utils::read_network_graph(&logger).unwrap();
 +              let mut params = ProbabilisticScoringFeeParameters::default();
 +              params.linear_success_probability = false;
 +              let scorer = ProbabilisticScorer::new(
 +                      ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
 +              generate_routes(bench, &network_graph, scorer, &params,
 +                      channelmanager::provided_invoice_features(&UserConfig::default()), 0,
 +                      "generate_routes_with_nonlinear_probabilistic_scorer");
 +      }
 +
 +      pub fn generate_mpp_routes_with_nonlinear_probabilistic_scorer(bench: &mut Criterion) {
 +              let logger = TestLogger::new();
 +              let network_graph = bench_utils::read_network_graph(&logger).unwrap();
 +              let mut params = ProbabilisticScoringFeeParameters::default();
 +              params.linear_success_probability = false;
 +              let scorer = ProbabilisticScorer::new(
 +                      ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
 +              generate_routes(bench, &network_graph, scorer, &params,
 +                      channelmanager::provided_invoice_features(&UserConfig::default()), 0,
 +                      "generate_mpp_routes_with_nonlinear_probabilistic_scorer");
 +      }
 +
 +      pub fn generate_large_mpp_routes_with_nonlinear_probabilistic_scorer(bench: &mut Criterion) {
 +              let logger = TestLogger::new();
 +              let network_graph = bench_utils::read_network_graph(&logger).unwrap();
 +              let mut params = ProbabilisticScoringFeeParameters::default();
 +              params.linear_success_probability = false;
 +              let scorer = ProbabilisticScorer::new(
 +                      ProbabilisticScoringDecayParameters::default(), &network_graph, &logger);
 +              generate_routes(bench, &network_graph, scorer, &params,
 +                      channelmanager::provided_invoice_features(&UserConfig::default()), 100_000_000,
 +                      "generate_large_mpp_routes_with_nonlinear_probabilistic_scorer");
 +      }
 +
        fn generate_routes<S: ScoreLookUp + ScoreUpdate>(
                bench: &mut Criterion, graph: &NetworkGraph<&TestLogger>, mut scorer: S,
                score_params: &S::ScoreParams, features: Bolt11InvoiceFeatures, starting_amount: u64,