Merge pull request #2590 from TheBlueMatt/2023-09-default-score-params
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Thu, 21 Sep 2023 20:40:13 +0000 (20:40 +0000)
committerGitHub <noreply@github.com>
Thu, 21 Sep 2023 20:40:13 +0000 (20:40 +0000)
Use `Default::default()` to construct `()` as a test scoring param

1  2 
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/payment_tests.rs

index 8b524d8f3f4d81647f57853b618124960fd8ff84,2a16a76228fca8f385b9094754d09d2585627f52..e553e8e534055ae9cf3a8d64e0acc83aa04e30a6
@@@ -923,14 -923,12 +923,14 @@@ wher
  /// called [`funding_transaction_generated`] for outbound channels) being closed.
  ///
  /// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
 -/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
 -/// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
 -/// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
 -/// the serialization process). If the deserialized version is out-of-date compared to the
 -/// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
 -/// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
 +/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
 +/// [`ChannelMonitorUpdate`] before returning from
 +/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
 +/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
 +/// `ChannelManager` operations from occurring during the serialization process). If the
 +/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
 +/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
 +/// will be lost (modulo on-chain transaction fees).
  ///
  /// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
  /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
@@@ -2042,30 -2040,56 +2042,30 @@@ macro_rules! handle_monitor_update_comp
  }
  
  macro_rules! handle_new_monitor_update {
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, _internal, $remove: expr, $completed: expr) => { {
 -              // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
 -              // any case so that it won't deadlock.
 -              debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
 +      ($self: ident, $update_res: expr, $chan: expr, _internal, $completed: expr) => { {
                debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
                match $update_res {
 +                      ChannelMonitorUpdateStatus::UnrecoverableError => {
 +                              let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
 +                              log_error!($self.logger, "{}", err_str);
 +                              panic!("{}", err_str);
 +                      },
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
                                        &$chan.context.channel_id());
 -                              Ok(false)
 -                      },
 -                      ChannelMonitorUpdateStatus::PermanentFailure => {
 -                              log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
 -                                      &$chan.context.channel_id());
 -                              update_maps_on_chan_removal!($self, &$chan.context);
 -                              let res = Err(MsgHandleErrInternal::from_finish_shutdown(
 -                                      "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
 -                                      $chan.context.get_user_id(), $chan.context.force_shutdown(false),
 -                                      $self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
 -                              $remove;
 -                              res
 +                              false
                        },
                        ChannelMonitorUpdateStatus::Completed => {
                                $completed;
 -                              Ok(true)
 +                              true
                        },
                }
        } };
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING_INITIAL_MONITOR, $remove: expr) => {
 -              handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
 -                      $per_peer_state_lock, $chan, _internal, $remove,
 +      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
 +              handle_new_monitor_update!($self, $update_res, $chan, _internal,
                        handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
        };
 -      ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
 -              if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
 -                      handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
 -                              $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
 -              } else {
 -                      // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
 -                      // update).
 -                      debug_assert!(false);
 -                      let channel_id = *$chan_entry.key();
 -                      let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
 -                              "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
 -                              $chan_entry.get_mut(), &channel_id);
 -                      $chan_entry.remove();
 -                      Err(err)
 -              }
 -      };
 -      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
 +      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
                let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
                        .or_insert_with(Vec::new);
                // During startup, we push monitor updates as background events through to here in
                                in_flight_updates.len() - 1
                        });
                let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]);
 -              handle_new_monitor_update!($self, update_res, $peer_state_lock, $peer_state,
 -                      $per_peer_state_lock, $chan, _internal, $remove,
 +              handle_new_monitor_update!($self, update_res, $chan, _internal,
                        {
                                let _ = in_flight_updates.remove(idx);
                                if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
                                }
                        })
        } };
 -      ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
 -              if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
 -                      handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
 -                              $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
 -              } else {
 -                      // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
 -                      // update).
 -                      debug_assert!(false);
 -                      let channel_id = *$chan_entry.key();
 -                      let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
 -                              "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
 -                              $chan_entry.get_mut(), &channel_id);
 -                      $chan_entry.remove();
 -                      Err(err)
 -              }
 -      }
  }
  
  macro_rules! process_events_body {
@@@ -2497,64 -2538,61 +2497,64 @@@ wher
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
  
                let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
 -              let result: Result<(), _> = loop {
 -                      {
 -                              let per_peer_state = self.per_peer_state.read().unwrap();
 +              loop {
 +                      let per_peer_state = self.per_peer_state.read().unwrap();
  
 -                              let peer_state_mutex = per_peer_state.get(counterparty_node_id)
 -                                      .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
 +                      let peer_state_mutex = per_peer_state.get(counterparty_node_id)
 +                              .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
  
 -                              let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 -                              let peer_state = &mut *peer_state_lock;
 +                      let mut peer_state_lock = peer_state_mutex.lock().unwrap();
 +                      let peer_state = &mut *peer_state_lock;
  
 -                              match peer_state.channel_by_id.entry(channel_id.clone()) {
 -                                      hash_map::Entry::Occupied(mut chan_phase_entry) => {
 -                                              if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 -                                                      let funding_txo_opt = chan.context.get_funding_txo();
 -                                                      let their_features = &peer_state.latest_features;
 -                                                      let (shutdown_msg, mut monitor_update_opt, htlcs) =
 -                                                              chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
 -                                                      failed_htlcs = htlcs;
 +                      match peer_state.channel_by_id.entry(channel_id.clone()) {
 +                              hash_map::Entry::Occupied(mut chan_phase_entry) => {
 +                                      if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
 +                                              let funding_txo_opt = chan.context.get_funding_txo();
 +                                              let their_features = &peer_state.latest_features;
 +                                              let (shutdown_msg, mut monitor_update_opt, htlcs) =
 +                                                      chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
 +                                              failed_htlcs = htlcs;
 +
 +                                              // We can send the `shutdown` message before updating the `ChannelMonitor`
 +                                              // here as we don't need the monitor update to complete until we send a
 +                                              // `shutdown_signed`, which we'll delay if we're pending a monitor update.
 +                                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 +                                                      node_id: *counterparty_node_id,
 +                                                      msg: shutdown_msg,
 +                                              });
  
 -                                                      // We can send the `shutdown` message before updating the `ChannelMonitor`
 -                                                      // here as we don't need the monitor update to complete until we send a
 -                                                      // `shutdown_signed`, which we'll delay if we're pending a monitor update.
 -                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
 -                                                              node_id: *counterparty_node_id,
 -                                                              msg: shutdown_msg,
 -                                                      });
 +                                              debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
 +                                                      "We can't both complete shutdown and generate a monitor update");
  
 -                                                      // Update the monitor with the shutdown script if necessary.
 -                                                      if let Some(monitor_update) = monitor_update_opt.take() {
 -                                                              break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 -                                                                      peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
 -                                                      }
 +                                              // Update the monitor with the shutdown script if necessary.
 +                                              if let Some(monitor_update) = monitor_update_opt.take() {
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
 +                                                      break;
 +                                              }
  
 -                                                      if chan.is_shutdown() {
 -                                                              if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
 -                                                                      if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
 -                                                                              peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                                                      msg: channel_update
 -                                                                              });
 -                                                                      }
 -                                                                      self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
 +                                              if chan.is_shutdown() {
 +                                                      if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
 +                                                              if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
 +                                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                              msg: channel_update
 +                                                                      });
                                                                }
 +                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                        }
 -                                                      break Ok(());
                                                }
 -                                      },
 -                                      hash_map::Entry::Vacant(_) => (),
 -                              }
 +                                              break;
 +                                      }
 +                              },
 +                              hash_map::Entry::Vacant(_) => {
 +                                      // If we reach this point, it means that the channel_id either refers to an unfunded channel or
 +                                      // it does not exist for this peer. Either way, we can attempt to force-close it.
 +                                      //
 +                                      // An appropriate error will be returned for non-existence of the channel if that's the case.
 +                                      return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
 +                              },
                        }
 -                      // If we reach this point, it means that the channel_id either refers to an unfunded channel or
 -                      // it does not exist for this peer. Either way, we can attempt to force-close it.
 -                      //
 -                      // An appropriate error will be returned for non-existence of the channel if that's the case.
 -                      return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
 -              };
 +              }
  
                for htlc_source in failed_htlcs.drain(..) {
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
  
 -              let _ = handle_error!(self, result, *counterparty_node_id);
                Ok(())
        }
  
                                                        }, onion_packet, None, &self.fee_estimator, &self.logger);
                                                match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
                                                        Some(monitor_update) => {
 -                                                              match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
 -                                                                      Err(e) => break Err(e),
 -                                                                      Ok(false) => {
 +                                                              match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
 +                                                                      false => {
                                                                                // Note that MonitorUpdateInProgress here indicates (per function
                                                                                // docs) that we will resend the commitment update once monitor
                                                                                // updating completes. Therefore, we must return an error
                                                                                // MonitorUpdateInProgress, below.
                                                                                return Err(APIError::MonitorUpdateInProgress);
                                                                        },
 -                                                                      Ok(true) => {},
 +                                                                      true => {},
                                                                }
                                                        },
                                                        None => {},
                                },
                                BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
                                        let mut updated_chan = false;
 -                                      let res = {
 +                                      {
                                                let per_peer_state = self.per_peer_state.read().unwrap();
                                                if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
                                                        let mut peer_state_lock = peer_state_mutex.lock().unwrap();
                                                        let peer_state = &mut *peer_state_lock;
                                                        match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
                                                                hash_map::Entry::Occupied(mut chan_phase) => {
 -                                                                      updated_chan = true;
 -                                                                      handle_new_monitor_update!(self, funding_txo, update.clone(),
 -                                                                              peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
 +                                                                      if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
 +                                                                              updated_chan = true;
 +                                                                              handle_new_monitor_update!(self, funding_txo, update.clone(),
 +                                                                                      peer_state_lock, peer_state, per_peer_state, chan);
 +                                                                      } else {
 +                                                                              debug_assert!(false, "We shouldn't have an update for a non-funded channel");
 +                                                                      }
                                                                },
 -                                                              hash_map::Entry::Vacant(_) => Ok(()),
 +                                                              hash_map::Entry::Vacant(_) => {},
                                                        }
 -                                              } else { Ok(()) }
 -                                      };
 +                                              }
 +                                      }
                                        if !updated_chan {
                                                // TODO: Track this as in-flight even though the channel is closed.
                                                let _ = self.chain_monitor.update_channel(funding_txo, &update);
                                        }
 -                                      // TODO: If this channel has since closed, we're likely providing a payment
 -                                      // preimage update, which we must ensure is durable! We currently don't,
 -                                      // however, ensure that.
 -                                      if res.is_err() {
 -                                              log_error!(self.logger,
 -                                                      "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
 -                                      }
 -                                      let _ = handle_error!(self, res, counterparty_node_id);
                                },
                                BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
                                        let per_peer_state = self.per_peer_state.read().unwrap();
                                                                peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
                                                        }
                                                        if !during_init {
 -                                                              let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
 -                                                                      peer_state, per_peer_state, chan_phase_entry);
 -                                                              if let Err(e) = res {
 -                                                                      // TODO: This is a *critical* error - we probably updated the outbound edge
 -                                                                      // of the HTLC's monitor with a preimage. We should retry this monitor
 -                                                                      // update over and over again until morale improves.
 -                                                                      log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
 -                                                                      return Err((counterparty_node_id, e));
 -                                                              }
 +                                                              handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
 +                                                                      peer_state, per_peer_state, chan);
                                                        } else {
                                                                // If we're running during init we cannot update a monitor directly -
                                                                // they probably haven't actually been loaded yet. Instead, push the
                                Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
 -                              match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
 +                              let mut id_to_peer_lock = self.id_to_peer.lock().unwrap();
 +                              match id_to_peer_lock.entry(chan.context.channel_id()) {
                                        hash_map::Entry::Occupied(_) => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close(
                                                        "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
                                                        funding_msg.channel_id))
                                        },
                                        hash_map::Entry::Vacant(i_e) => {
 -                                              i_e.insert(chan.context.get_counterparty_node_id());
 -                                      }
 -                              }
 -
 -                              // There's no problem signing a counterparty's funding transaction if our monitor
 -                              // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 -                              // accepted payment from yet. We do, however, need to wait to send our channel_ready
 -                              // until we have persisted our monitor.
 -                              let new_channel_id = funding_msg.channel_id;
 -                              peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 -                                      node_id: counterparty_node_id.clone(),
 -                                      msg: funding_msg,
 -                              });
 +                                              let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 +                                              if let Ok(persist_state) = monitor_res {
 +                                                      i_e.insert(chan.context.get_counterparty_node_id());
 +                                                      mem::drop(id_to_peer_lock);
 +
 +                                                      // There's no problem signing a counterparty's funding transaction if our monitor
 +                                                      // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
 +                                                      // accepted payment from yet. We do, however, need to wait to send our channel_ready
 +                                                      // until we have persisted our monitor.
 +                                                      peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
 +                                                              node_id: counterparty_node_id.clone(),
 +                                                              msg: funding_msg,
 +                                                      });
  
 -                              let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
 -
 -                              if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
 -                                      let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
 -                                              per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
 -                                              { peer_state.channel_by_id.remove(&new_channel_id) });
 -
 -                                      // Note that we reply with the new channel_id in error messages if we gave up on the
 -                                      // channel, not the temporary_channel_id. This is compatible with ourselves, but the
 -                                      // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
 -                                      // any messages referencing a previously-closed channel anyway.
 -                                      // We do not propagate the monitor update to the user as it would be for a monitor
 -                                      // that we didn't manage to store (and that we don't care about - we don't respond
 -                                      // with the funding_signed so the channel can never go on chain).
 -                                      if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
 -                                              res.0 = None;
 +                                                      if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
 +                                                              handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
 +                                                                      per_peer_state, chan, INITIAL_MONITOR);
 +                                                      } else {
 +                                                              unreachable!("This must be a funded channel as we just inserted it.");
 +                                                      }
 +                                                      Ok(())
 +                                              } else {
 +                                                      log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
 +                                                      return Err(MsgHandleErrInternal::send_err_msg_no_close(
 +                                                              "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
 +                                                              funding_msg.channel_id));
 +                                              }
                                        }
 -                                      res.map(|_| ())
 -                              } else {
 -                                      unreachable!("This must be a funded channel as we just inserted it.");
                                }
                        }
                }
                                        ChannelPhase::Funded(ref mut chan) => {
                                                let monitor = try_chan_phase_entry!(self,
                                                        chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
 -                                              let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
 -                                              let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
 -                                              if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
 -                                                      // We weren't able to watch the channel to begin with, so no updates should be made on
 -                                                      // it. Previously, full_stack_target found an (unreachable) panic when the
 -                                                      // monitor update contained within `shutdown_finish` was applied.
 -                                                      if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
 -                                                              shutdown_finish.0.take();
 -                                                      }
 +                                              if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
 +                                                      handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
 +                                                      Ok(())
 +                                              } else {
 +                                                      try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
                                                }
 -                                              res.map(|_| ())
                                        },
                                        _ => {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
  
        fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
                let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
 -              let result: Result<(), _> = loop {
 +              {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let peer_state_mutex = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                }
                                                // Update the monitor with the shutdown script if necessary.
                                                if let Some(monitor_update) = monitor_update_opt {
 -                                                      break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
 +                                                      handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
                                                }
 -                                              break Ok(());
                                        },
                                        ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
                                                let context = phase.context_mut();
                        } else {
                                return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
                        }
 -              };
 +              }
                for htlc_source in dropped_htlcs.drain(..) {
                        let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
                        let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
                        self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
                }
  
 -              result
 +              Ok(())
        }
  
        fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
                                        let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
                                        if let Some(monitor_update) = monitor_update_opt {
                                                handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
 -                                                      peer_state, per_peer_state, chan_phase_entry).map(|_| ())
 -                                      } else { Ok(()) }
 +                                                      peer_state, per_peer_state, chan);
 +                                      }
 +                                      Ok(())
                                } else {
                                        return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
        }
  
        fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
 -              let (htlcs_to_fail, res) = {
 +              let htlcs_to_fail = {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
                                .ok_or_else(|| {
                                                } else { false };
                                                let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
                                                        chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
 -                                              let res = if let Some(monitor_update) = monitor_update_opt {
 +                                              if let Some(monitor_update) = monitor_update_opt {
                                                        let funding_txo = funding_txo_opt
                                                                .expect("Funding outpoint must have been set for RAA handling to succeed");
                                                        handle_new_monitor_update!(self, funding_txo, monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
 -                                              } else { Ok(()) };
 -                                              (htlcs_to_fail, res)
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
 +                                              }
 +                                              htlcs_to_fail
                                        } else {
                                                return try_chan_phase_entry!(self, Err(ChannelError::Close(
                                                        "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
                        }
                };
                self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
 -              res
 +              Ok(())
        }
  
        fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
                                                        self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
                                                }
                                        },
 -                                      MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
 -                                      MonitorEvent::UpdateFailed(funding_outpoint) => {
 +                                      MonitorEvent::HolderForceClosed(funding_outpoint) => {
                                                let counterparty_node_id_opt = match counterparty_node_id {
                                                        Some(cp_id) => Some(cp_id),
                                                        None => {
                                                                                                msg: update
                                                                                        });
                                                                                }
 -                                                                              let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
 -                                                                                      ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
 -                                                                              } else {
 -                                                                                      ClosureReason::CommitmentTxConfirmed
 -                                                                              };
 -                                                                              self.issue_channel_close_events(&chan.context, reason);
 +                                                                              self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
                                                                                pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                                                        node_id: chan.context.get_counterparty_node_id(),
                                                                                        action: msgs::ErrorAction::SendErrorMessage {
        fn check_free_holding_cells(&self) -> bool {
                let mut has_monitor_update = false;
                let mut failed_htlcs = Vec::new();
 -              let mut handle_errors = Vec::new();
  
                // Walk our list of channels and find any that need to update. Note that when we do find an
                // update, if it includes actions that must be taken afterwards, we have to drop the
                                                if let Some(monitor_update) = monitor_opt {
                                                        has_monitor_update = true;
  
 -                                                      let channel_id: ChannelId = *channel_id;
 -                                                      let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
 -                                                              peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
 -                                                              peer_state.channel_by_id.remove(&channel_id));
 -                                                      if res.is_err() {
 -                                                              handle_errors.push((counterparty_node_id, res));
 -                                                      }
 +                                                      handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
 +                                                              peer_state_lock, peer_state, per_peer_state, chan);
                                                        continue 'peer_loop;
                                                }
                                        }
                        break 'peer_loop;
                }
  
 -              let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
 +              let has_update = has_monitor_update || !failed_htlcs.is_empty();
                for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
                        self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
                }
  
 -              for (counterparty_node_id, err) in handle_errors.drain(..) {
 -                      let _ = handle_error!(self, err, counterparty_node_id);
 -              }
 -
                has_update
        }
  
        /// operation. It will double-check that nothing *else* is also blocking the same channel from
        /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
        fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
 -              let mut errors = Vec::new();
                loop {
                        let per_peer_state = self.per_peer_state.read().unwrap();
                        if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
                                                if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
                                                        log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
                                                                channel_funding_outpoint.to_channel_id());
 -                                                      if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
 -                                                              peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
 -                                                      {
 -                                                              errors.push((e, counterparty_node_id));
 -                                                      }
 +                                                      handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
 +                                                              peer_state_lck, peer_state, per_peer_state, chan);
                                                        if further_update_exists {
                                                                // If there are more `ChannelMonitorUpdate`s to process, restart at the
                                                                // top of the loop.
                        }
                        break;
                }
 -              for (err, counterparty_node_id) in errors {
 -                      let res = Err::<(), _>(err);
 -                      let _ = handle_error!(self, res, counterparty_node_id);
 -              }
        }
  
        fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
@@@ -10105,7 -10190,7 +10105,7 @@@ mod tests 
                        TEST_FINAL_CLTV, false), 100_000);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
                let payment_preimage = PaymentPreimage([42; 32]);
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                        RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
                );
                let route = find_route(
                        &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
-                       None, nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
                let payment_id_2 = PaymentId([45; 32]);
                nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
  
                let test_preimage = PaymentPreimage([42; 32]);
                let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
                let route = find_route(
                        &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
-                       nodes[0].logger, &scorer, &(), &random_seed_bytes
+                       nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
                ).unwrap();
  
                let test_preimage = PaymentPreimage([42; 32]);
index 5a1a21e2999863b9f6af7b75da6c467b4228e7f3,a48c78f5217b84ae30d123658203fa3bea69b897..82fa06f5c80b8a5494b9b546d4e5ccb0d4287999
@@@ -422,10 -422,6 +422,10 @@@ pub struct Node<'chan_man, 'node_cfg: '
                &'chan_mon_cfg test_utils::TestLogger,
        >,
  }
 +#[cfg(feature = "std")]
 +impl<'a, 'b, 'c> std::panic::UnwindSafe for Node<'a, 'b, 'c> {}
 +#[cfg(feature = "std")]
 +impl<'a, 'b, 'c> std::panic::RefUnwindSafe for Node<'a, 'b, 'c> {}
  impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
        pub fn best_block_hash(&self) -> BlockHash {
                self.blocks.lock().unwrap().last().unwrap().0.block_hash()
@@@ -582,7 -578,7 +582,7 @@@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 
                        let chain_source = test_utils::TestChainSource::new(Network::Testnet);
                        let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager);
                        for deserialized_monitor in deserialized_monitors.drain(..) {
 -                              if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != ChannelMonitorUpdateStatus::Completed {
 +                              if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) {
                                        panic!();
                                }
                        }
@@@ -981,7 -977,7 +981,7 @@@ pub fn _reload_node<'a, 'b, 'c>(node: &
  
        for monitor in monitors_read.drain(..) {
                assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
 -                      ChannelMonitorUpdateStatus::Completed);
 +                      Ok(ChannelMonitorUpdateStatus::Completed));
                check_added_monitors!(node, 1);
        }
  
@@@ -1858,7 -1854,7 +1858,7 @@@ pub fn get_route(send_node: &Node, rout
        router::get_route(
                &send_node.node.get_our_node_id(), route_params, &send_node.network_graph.read_only(),
                Some(&send_node.node.list_usable_channels().iter().collect::<Vec<_>>()),
-               send_node.logger, &scorer, &(), &random_seed_bytes
+               send_node.logger, &scorer, &Default::default(), &random_seed_bytes
        )
  }
  
@@@ -2510,7 -2506,7 +2510,7 @@@ pub fn route_over_limit<'a, 'b, 'c>(ori
        let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
        let random_seed_bytes = keys_manager.get_secure_random_bytes();
        let route = router::get_route(&origin_node.node.get_our_node_id(), &route_params, &network_graph,
-               None, origin_node.logger, &scorer, &(), &random_seed_bytes).unwrap();
+               None, origin_node.logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        assert_eq!(route.paths.len(), 1);
        assert_eq!(route.paths[0].hops.len(), expected_route.len());
        for (node, hop) in expected_route.iter().zip(route.paths[0].hops.iter()) {
index fa766f0ab552abf624d8a25cc2903fcf9e6952ff,00df878752f4ecbc257f72190ebf4df88723e988..1066362a4c4c101791a0c12a051e36c7986c2741
@@@ -2408,7 -2408,6 +2408,7 @@@ fn channel_monitor_network_test() 
                }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
 +              check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
  
                mine_transaction(&nodes[4], &node_txn[0]);
                check_preimage_claim(&nodes[4], &node_txn);
        assert_eq!(nodes[4].node.list_channels().len(), 0);
  
        assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
 -              ChannelMonitorUpdateStatus::Completed);
 -      check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[4].node.get_our_node_id()], 100000);
 -      check_closed_event!(nodes[4], 1, ClosureReason::CommitmentTxConfirmed, [nodes[3].node.get_our_node_id()], 100000);
 +              Ok(ChannelMonitorUpdateStatus::Completed));
 +      check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
  }
  
  #[test]
@@@ -5660,7 -5660,7 +5660,7 @@@ fn do_htlc_claim_local_commitment_only(
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 -      check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
  }
  
  fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
        check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
 -      check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 +      check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
  }
  
  fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
                check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
 -              check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
 +              check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
        }
@@@ -7231,7 -7231,7 +7231,7 @@@ fn test_check_htlc_underpaying() 
                TEST_FINAL_CLTV).with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000);
        let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(),
-               None, nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]);
        let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, our_payment_hash,
@@@ -7489,12 -7489,12 +7489,12 @@@ fn test_bump_penalty_txn_on_revoked_htl
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0;
        let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50).with_bolt11_features(nodes[0].node.invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000);
        let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes).unwrap();
+               nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap();
        send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000);
  
        let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
@@@ -8453,7 -8453,7 +8453,7 @@@ fn test_update_err_monitor_lockdown() 
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
 -              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
 +              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
                let mut node_0_peer_state_lock;
                if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
                        if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
 -                              assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
 +                              assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
                                assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                        } else { assert!(false); }
                } else {
@@@ -8526,7 -8526,7 +8526,7 @@@ fn test_concurrent_monitor_claim() 
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
 -              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
 +              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new());
                        new_monitor
                };
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
 -              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
 +              assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed));
                watchtower
        };
        watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1);
                if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
                        if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
                                // Watchtower Alice should already have seen the block and reject the update
 -                              assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
 +                              assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
                                assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                                assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
                        } else { assert!(false); }
        let height = HTLC_TIMEOUT_BROADCAST + 1;
        connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
        check_closed_broadcast(&nodes[0], 1, true);
 -      check_closed_event!(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false,
 +      check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
                [nodes[1].node.get_our_node_id()], 100000);
        watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
        check_added_monitors(&nodes[0], 1);
index e5af136656f8ab8328e47f796bb6e8c29139f5ac,7a0d5c5ef95694b799218abd83b42af10bdba63e..19f55f4f2d30ea64b62db1bca27263e8ff70f186
@@@ -267,7 -267,7 +267,7 @@@ fn do_test_keysend_payments(public_node
        let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
        let route = find_route(
                &payer_pubkey, &route_params, &network_graph, first_hops,
-               nodes[0].logger, &scorer, &(), &random_seed_bytes
+               nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
        ).unwrap();
  
        {
@@@ -320,7 -320,7 +320,7 @@@ fn test_mpp_keysend() 
        let scorer = test_utils::TestScorer::new();
        let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
        let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger,
-               &scorer, &(), &random_seed_bytes).unwrap();
+               &scorer, &Default::default(), &random_seed_bytes).unwrap();
  
        let payment_preimage = PaymentPreimage([42; 32]);
        let payment_secret = PaymentSecret(payment_preimage.0);
@@@ -1106,7 -1106,7 +1106,7 @@@ fn get_ldk_payment_preimage() 
        let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
                &nodes[0].network_graph.read_only(),
                Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()), nodes[0].logger,
-               &scorer, &(), &random_seed_bytes).unwrap();
+               &scorer, &Default::default(), &random_seed_bytes).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
                RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
        check_added_monitors!(nodes[0], 1);
@@@ -1750,9 -1750,9 +1750,9 @@@ fn do_test_intercepted_payment(test: In
                ]).unwrap()
                .with_bolt11_features(nodes[2].node.invoice_features()).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat,);
-       let route = get_route( &nodes[0].node.get_our_node_id(), &route_params,
-               &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &(),
-               &random_seed_bytes,).unwrap();
+       let route = get_route(&nodes[0].node.get_our_node_id(), &route_params,
+               &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(),
+               &random_seed_bytes).unwrap();
  
        let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
        nodes[0].node.send_payment_with_route(&route, payment_hash,
@@@ -2258,14 -2258,12 +2258,14 @@@ fn auto_retry_partial_failure() 
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
 +      // Open three channels, the first has plenty of liquidity, the second and third have ~no
 +      // available liquidity, causing any outbound payments routed over it to fail immediately.
        let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      let chan_3_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 +      let chan_2_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
 +      let chan_3_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
  
        // Marshall data to send the payment
 -      let amt_msat = 20_000;
 +      let amt_msat = 10_000_000;
        let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
        #[cfg(feature = "std")]
        let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
                .with_bolt11_features(invoice_features).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
  
 -      // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
 -      // second (for the initial send path2 over chan_2) fails.
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 -      // Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
 -      // the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
 -
        // Configure the initial send, retry1 and retry2's paths.
        let send_route = Route {
                paths: vec![
        // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
 -      let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(closed_chan_events.len(), 4);
 -      match closed_chan_events[0] {
 -              Event::ChannelClosed { .. } => {},
 -              _ => panic!("Unexpected event"),
 -      }
 -      match closed_chan_events[1] {
 +      let payment_failed_events = nodes[0].node.get_and_clear_pending_events();
 +      assert_eq!(payment_failed_events.len(), 2);
 +      match payment_failed_events[0] {
                Event::PaymentPathFailed { .. } => {},
                _ => panic!("Unexpected event"),
        }
 -      match closed_chan_events[2] {
 -              Event::ChannelClosed { .. } => {},
 -              _ => panic!("Unexpected event"),
 -      }
 -      match closed_chan_events[3] {
 +      match payment_failed_events[1] {
                Event::PaymentPathFailed { .. } => {},
                _ => panic!("Unexpected event"),
        }
  
        // Pass the first part of the payment along the path.
 -      check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
 +      check_added_monitors!(nodes[0], 1); // only one HTLC actually made it out
        let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
  
 -      // First message is the first update_add, remaining messages are broadcasting channel updates and
 -      // errors for the permfailed channels
 -      assert_eq!(msg_events.len(), 5);
 +      // Only one HTLC/channel update actually made it out
 +      assert_eq!(msg_events.len(), 1);
        let mut payment_event = SendEvent::from_event(msg_events.remove(0));
  
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
@@@ -2461,13 -2478,12 +2461,13 @@@ fn auto_retry_zero_attempts_send_error(
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
 -      create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 -      create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
 +      // Open a single channel that does not have sufficient liquidity for the payment we want to
 +      // send.
 +      let chan_id  = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id;
  
        // Marshall data to send the payment
 -      let amt_msat = 20_000;
 -      let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
 +      let amt_msat = 10_000_000;
 +      let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None);
        #[cfg(feature = "std")]
        let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
        #[cfg(not(feature = "std"))]
                .with_bolt11_features(invoice_features).unwrap();
        let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat);
  
 -      chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
 +      // Override the route search to return a route, rather than failing at the route-finding step.
 +      let send_route = Route {
 +              paths: vec![
 +                      Path { hops: vec![RouteHop {
 +                              pubkey: nodes[1].node.get_our_node_id(),
 +                              node_features: nodes[1].node.node_features(),
 +                              short_channel_id: chan_id,
 +                              channel_features: nodes[1].node.channel_features(),
 +                              fee_msat: amt_msat,
 +                              cltv_expiry_delta: 100,
 +                              maybe_announced_channel: true,
 +                      }], blinded_tail: None },
 +              ],
 +              route_params: Some(route_params.clone()),
 +      };
 +      nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
 +
        nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
                PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
 -      assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
 +      assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        let events = nodes[0].node.get_and_clear_pending_events();
 -      assert_eq!(events.len(), 3);
 -      if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); }
 -      if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); }
 -      if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); }
 -      check_added_monitors!(nodes[0], 2);
 +      assert_eq!(events.len(), 2);
 +      if let Event::PaymentPathFailed { .. } = events[0] { } else { panic!(); }
 +      if let Event::PaymentFailed { .. } = events[1] { } else { panic!(); }
 +      check_added_monitors!(nodes[0], 0);
  }
  
  #[test]