X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=10d2a69d20fc2c1d321ef5d45d0497fc8e08a147;hb=30b9d9fbeaa62537beb8d3ea0b2866703d0d7c92;hp=720f6e42992db450fb02baf5b25c7b9227e9850c;hpb=6ffd02237fc06f86a4a639eea0f1f470dc8f7d59;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 720f6e42..10d2a69d 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -493,6 +493,18 @@ pub(super) struct PeerState { is_connected: bool, } +impl PeerState { + /// Indicates that a peer meets the criteria where we're ok to remove it from our storage. + /// If true is passed for `require_disconnected`, the function will return false if we haven't + /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`. + fn ok_to_remove(&self, require_disconnected: bool) -> bool { + if require_disconnected && self.is_connected { + return false + } + self.channel_by_id.len() == 0 + } +} + /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is /// actually ours and not some duplicate HTLC sent to us by a node along the route. /// @@ -1178,9 +1190,9 @@ pub enum RecentPaymentDetails { /// made before LDK version 0.0.104. payment_hash: Option, }, - /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it - /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also - /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted. + /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly + /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all + /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated. Abandoned { /// Hash of the payment that we have given up trying to send. payment_hash: PaymentHash, @@ -1246,26 +1258,6 @@ macro_rules! handle_error { let mut peer_state = peer_state_mutex.lock().unwrap(); peer_state.pending_msg_events.append(&mut msg_events); } - #[cfg(any(feature = "_test_utils", test))] - { - if per_peer_state.get(&$counterparty_node_id).is_none() { - // This shouldn't occur in tests unless an unknown counterparty_node_id - // has been passed to our message handling functions. - let expected_error_str = format!("Can't find a peer matching the passed counterparty node_id {}", $counterparty_node_id); - match err.action { - msgs::ErrorAction::SendErrorMessage { - msg: msgs::ErrorMessage { ref channel_id, ref data } - } - => { - assert_eq!(*data, expected_error_str); - if let Some((err_channel_id, _user_channel_id)) = chan_id { - debug_assert_eq!(*channel_id, err_channel_id); - } - } - _ => debug_assert!(false, "Unexpected event"), - } - } - } } // Return error in case higher-API need one @@ -1636,14 +1628,13 @@ where } fn list_channels_with_filter::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec { - let mut res = Vec::new(); // Allocate our best estimate of the number of channels we have in the `res` // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside // of the ChannelMonitor handling. Therefore reallocations may still occur, but is // unlikely as the `short_to_chan_info` map often contains 2 entries for // the same channel. - res.reserve(self.short_to_chan_info.read().unwrap().len()); + let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len()); { let best_block_height = self.best_block.read().unwrap().height(); let per_peer_state = self.per_peer_state.read().unwrap(); @@ -1727,7 +1718,7 @@ where /// /// This can be useful for payments that may have been prepared, but ultimately not sent, as a /// result of a crash. If such a payment exists, is not listed here, and an - /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment. + /// [`Event::PaymentSent`] has not been received, you may consider resending the payment. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn list_recent_payments(&self) -> Vec { @@ -1774,7 +1765,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -1893,7 +1884,7 @@ where -> Result { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(peer_node_id) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; let mut chan = { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2484,8 +2475,8 @@ where /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an - /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same - /// [`PaymentId`]. + /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a + /// second payment with the same [`PaymentId`]. /// /// Thus, in order to ensure duplicate payments are not sent, you should implement your own /// tracking of payments, including state to indicate once a payment has completed. Because you @@ -2530,6 +2521,7 @@ where /// [`Route`], we assume the invoice had the basic_mpp feature set. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent + /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { @@ -2546,7 +2538,7 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params, - &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(), + &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) @@ -2567,48 +2559,25 @@ where } - /// Retries a payment along the given [`Route`]. + /// Signals that no further retries for the given payment should occur. Useful if you have a + /// pending outbound payment with retries remaining, but wish to stop retrying the payment before + /// retries are exhausted. /// - /// Errors returned are a superset of those returned from [`send_payment`], so see - /// [`send_payment`] documentation for more details on errors. This method will also error if the - /// retry amount puts the payment more than 10% over the payment's total amount, if the payment - /// for the given `payment_id` cannot be found (likely due to timeout or success), or if - /// further retries have been disabled with [`abandon_payment`]. - /// - /// [`send_payment`]: [`ChannelManager::send_payment`] - /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] - pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) - } - - /// Signals that no further retries for the given payment will occur. - /// - /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id` - /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be - /// generated as soon as there are no remaining pending HTLCs for this payment. + /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon + /// as there are no remaining pending HTLCs for this payment. /// /// Note that calling this method does *not* prevent a payment from succeeding. You must still /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to /// determine the ultimate status of a payment. /// /// If an [`Event::PaymentFailed`] event is generated and we restart without this - /// [`ChannelManager`] having been persisted, the payment may still be in the pending state - /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call - /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to - /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`]. + /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated. /// - /// [`abandon_payment`]: Self::abandon_payment - /// [`retry_payment`]: Self::retry_payment /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn abandon_payment(&self, payment_id: PaymentId) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) { - self.pending_events.lock().unwrap().push(payment_failed_ev); - } + self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events); } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -2637,11 +2606,16 @@ where /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route /// based on `route_params` and retry failed payment paths based on `retry_strategy`. + /// + /// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous + /// payments. + /// + /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(), - self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) @@ -2671,7 +2645,7 @@ where ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -2840,7 +2814,7 @@ where ); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for channel_id in channel_ids { @@ -2895,7 +2869,7 @@ where let next_hop_scid = { let peer_state_lock = self.per_peer_state.read().unwrap(); let peer_state_mutex = peer_state_lock.get(&next_node_id) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { @@ -3376,7 +3350,8 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, + &self.pending_events, &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); @@ -3421,38 +3396,6 @@ where true } - /// When a peer disconnects but still has channels, the peer's `peer_state` entry in the - /// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of - /// to that peer is later closed while still being disconnected (i.e. force closed), we - /// therefore need to remove the peer from `peer_state` separately. - /// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we - /// instead remove such peers awaiting removal through this function, which is called on a - /// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels, - /// to limit the negative effects on parallelism as much as possible. - /// - /// Must be called without the `per_peer_state` lock acquired. - fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet) { - if pending_peers_awaiting_removal.len() > 0 { - let mut per_peer_state = self.per_peer_state.write().unwrap(); - for counterparty_node_id in pending_peers_awaiting_removal { - match per_peer_state.entry(counterparty_node_id) { - hash_map::Entry::Occupied(entry) => { - // Remove the entry if the peer is still disconnected and we still - // have no channels to the peer. - let remove_entry = { - let peer_state = entry.get().lock().unwrap(); - !peer_state.is_connected && peer_state.channel_by_id.len() == 0 - }; - if remove_entry { - entry.remove_entry(); - } - }, - hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ } - } - } - } - } - #[cfg(any(test, feature = "_test_utils"))] /// Process background events, for functional testing pub fn test_process_background_events(&self) { @@ -3526,7 +3469,7 @@ where let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); - let mut pending_peers_awaiting_removal = HashSet::new(); + let mut pending_peers_awaiting_removal = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() { @@ -3574,13 +3517,38 @@ where true }); - let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0; - if peer_should_be_removed { - pending_peers_awaiting_removal.insert(counterparty_node_id); + if peer_state.ok_to_remove(true) { + pending_peers_awaiting_removal.push(counterparty_node_id); + } + } + } + + // When a peer disconnects but still has channels, the peer's `peer_state` entry in the + // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels + // of to that peer is later closed while still being disconnected (i.e. force closed), + // we therefore need to remove the peer from `peer_state` separately. + // To avoid having to take the `per_peer_state` `write` lock once the channels are + // closed, we instead remove such peers awaiting removal here on a timer, to limit the + // negative effects on parallelism as much as possible. + if pending_peers_awaiting_removal.len() > 0 { + let mut per_peer_state = self.per_peer_state.write().unwrap(); + for counterparty_node_id in pending_peers_awaiting_removal { + match per_peer_state.entry(counterparty_node_id) { + hash_map::Entry::Occupied(entry) => { + // Remove the entry if the peer is still disconnected and we still + // have no channels to the peer. + let remove_entry = { + let peer_state = entry.get().lock().unwrap(); + peer_state.ok_to_remove(true) + }; + if remove_entry { + entry.remove_entry(); + } + }, + hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ } } } } - self.remove_peers_awaiting_removal(pending_peers_awaiting_removal); self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| { if htlcs.is_empty() { @@ -4317,7 +4285,7 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::APIMisuseError{ err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; + .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(temporary_channel_id.clone()) { @@ -4367,7 +4335,10 @@ where let outbound_scid_alias = self.create_and_insert_outbound_scid_alias(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider, @@ -4417,7 +4388,10 @@ where let (value, output_script, user_id) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.temporary_channel_id) { @@ -4442,7 +4416,10 @@ where fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id) + })?; let ((funding_msg, monitor, mut channel_ready), mut chan) = { let best_block = *self.best_block.read().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -4518,7 +4495,10 @@ where let best_block = *self.best_block.read().unwrap(); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4559,7 +4539,10 @@ where fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4600,7 +4583,10 @@ where let result: Result<(), _> = loop { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id.clone()) { @@ -4651,7 +4637,10 @@ where fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let (tx, chan_option) = { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; @@ -4706,7 +4695,10 @@ where let pending_forward_info = self.decode_update_add_htlc_onion(msg); let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4745,7 +4737,10 @@ where let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4762,7 +4757,10 @@ where fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4777,7 +4775,10 @@ where fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4796,7 +4797,10 @@ where fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -4937,7 +4941,10 @@ where let res = loop { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -5000,7 +5007,10 @@ where fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -5015,7 +5025,10 @@ where fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -5031,7 +5044,7 @@ where ), chan), // Note that announcement_signatures fails if the channel cannot be announced, // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), + update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()), }); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) @@ -5086,7 +5099,10 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id))?; + .ok_or_else(|| { + debug_assert!(false); + MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id) + })?; let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { @@ -5403,7 +5419,8 @@ where /// [`PaymentHash`] and [`PaymentPreimage`] for you. /// /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which - /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be + /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with + /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be /// passed directly to [`claim_funds`]. /// /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements. @@ -5423,7 +5440,9 @@ where /// /// [`claim_funds`]: Self::claim_funds /// [`PaymentClaimable`]: events::Event::PaymentClaimable - /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage + /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose + /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment + /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash pub fn create_inbound_payment(&self, min_value_msat: Option, invoice_expiry_delta_secs: u32, min_final_cltv_expiry_delta: Option) -> Result<(PaymentHash, PaymentSecret), ()> { @@ -5953,7 +5972,7 @@ where msg: announcement, // Note that announcement_signatures fails if the channel cannot be announced, // so get_channel_update_for_broadcast will never fail by the time we get here. - update_msg: self.get_channel_update_for_broadcast(channel).unwrap(), + update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()), }); } } @@ -6233,9 +6252,8 @@ where fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let mut failed_channels = Vec::new(); - let mut no_channels_remain = true; let mut per_peer_state = self.per_peer_state.write().unwrap(); - { + let remove_peer = { log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.", log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" }); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { @@ -6248,8 +6266,6 @@ where update_maps_on_chan_removal!(self, chan); self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; - } else { - no_channels_remain = false; } true }); @@ -6269,6 +6285,7 @@ where &events::MessageSendEvent::SendChannelAnnouncement { .. } => false, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::SendChannelUpdate { .. } => false, &events::MessageSendEvent::HandleError { .. } => false, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, @@ -6279,9 +6296,10 @@ where }); debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); peer_state.is_connected = false; - } - } - if no_channels_remain { + peer_state.ok_to_remove(true) + } else { true } + }; + if remove_peer { per_peer_state.remove(counterparty_node_id); } mem::drop(per_peer_state); @@ -6875,6 +6893,7 @@ where best_block.block_hash().write(writer)?; } + let mut serializable_peer_count: u64 = 0; { let per_peer_state = self.per_peer_state.read().unwrap(); let mut unfunded_channels = 0; @@ -6882,6 +6901,9 @@ where for (_, peer_state_mutex) in per_peer_state.iter() { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; + if !peer_state.ok_to_remove(false) { + serializable_peer_count += 1; + } number_of_channels += peer_state.channel_by_id.len(); for (_, channel) in peer_state.channel_by_id.iter() { if !channel.is_funding_initiated() { @@ -6932,11 +6954,18 @@ where htlc_purposes.push(purpose); } - (per_peer_state.len() as u64).write(writer)?; + (serializable_peer_count).write(writer)?; for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() { - peer_pubkey.write(writer)?; - let peer_state = peer_state_mutex.lock().unwrap(); - peer_state.latest_features.write(writer)?; + let peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state = &*peer_state_lock; + // Peers which we have no channels to should be dropped once disconnected. As we + // disconnect all peers when shutting down and serializing the ChannelManager, we + // consider all peers as disconnected here. There's therefore no need write peers with + // no channels. + if !peer_state.ok_to_remove(false) { + peer_pubkey.write(writer)?; + peer_state.latest_features.write(writer)?; + } } let events = self.pending_events.lock().unwrap(); @@ -7723,19 +7752,14 @@ where mod tests { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; - use bitcoin::hashes::hex::FromHex; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; - use bitcoin::secp256k1::ecdsa::Signature; - use bitcoin::secp256k1::ffi::Signature as FFISignature; - use bitcoin::blockdata::script::Script; - use bitcoin::Txid; use core::time::Duration; use core::sync::atomic::Ordering; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs; - use crate::ln::msgs::{ChannelMessageHandler, OptionalField}; + use crate::ln::msgs::ChannelMessageHandler; use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; use crate::util::errors::APIError; use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; @@ -7952,7 +7976,7 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); // To start (1), send a regular payment but don't claim it. @@ -8058,7 +8082,7 @@ mod tests { }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), @@ -8101,7 +8125,7 @@ mod tests { }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), @@ -8334,19 +8358,22 @@ mod tests { fn check_not_connected_to_peer_error(res_err: Result, expected_public_key: PublicKey) { let expected_message = format!("Not connected to node: {}", expected_public_key); - check_api_misuse_error_message(expected_message, res_err) + check_api_error_message(expected_message, res_err) } fn check_unkown_peer_error(res_err: Result, expected_public_key: PublicKey) { let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key); - check_api_misuse_error_message(expected_message, res_err) + check_api_error_message(expected_message, res_err) } - fn check_api_misuse_error_message(expected_err_message: String, res_err: Result) { + fn check_api_error_message(expected_err_message: String, res_err: Result) { match res_err { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, expected_err_message); }, + Err(APIError::ChannelUnavailable { err }) => { + assert_eq!(err, expected_err_message); + }, Ok(_) => panic!("Unexpected Ok"), Err(_) => panic!("Unexpected Error"), } @@ -8354,140 +8381,23 @@ mod tests { #[test] fn test_api_calls_with_unkown_counterparty_node() { - // Tests that our API functions and message handlers that expects a `counterparty_node_id` - // as input, behaves as expected if the `counterparty_node_id` is an unkown peer in the + // Tests that our API functions that expects a `counterparty_node_id` as input, behaves as + // expected if the `counterparty_node_id` is an unkown peer in the // `ChannelManager::per_peer_state` map. let chanmon_cfg = create_chanmon_cfgs(2); let node_cfg = create_node_cfgs(2, &chanmon_cfg); let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); let nodes = create_network(2, &node_cfg, &node_chanmgr); - // Boilerplate code to produce `open_channel` and `accept_channel` msgs more densly than - // creating dummy ones. - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - let accept_channel_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - // Dummy values let channel_id = [4; 32]; - let signature = Signature::from(unsafe { FFISignature::new() }); let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap()); let intercept_id = InterceptId([0; 32]); - // Dummy msgs - let funding_created_msg = msgs::FundingCreated { - temporary_channel_id: open_channel_msg.temporary_channel_id, - funding_txid: Txid::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(), - funding_output_index: 0, - signature: signature, - }; - - let funding_signed_msg = msgs::FundingSigned { - channel_id: channel_id, - signature: signature, - }; - - let channel_ready_msg = msgs::ChannelReady { - channel_id: channel_id, - next_per_commitment_point: unkown_public_key, - short_channel_id_alias: None, - }; - - let announcement_signatures_msg = msgs::AnnouncementSignatures { - channel_id: channel_id, - short_channel_id: 0, - node_signature: signature, - bitcoin_signature: signature, - }; - - let channel_reestablish_msg = msgs::ChannelReestablish { - channel_id: channel_id, - next_local_commitment_number: 0, - next_remote_commitment_number: 0, - data_loss_protect: OptionalField::Absent, - }; - - let closing_signed_msg = msgs::ClosingSigned { - channel_id: channel_id, - fee_satoshis: 1000, - signature: signature, - fee_range: None, - }; - - let shutdown_msg = msgs::Shutdown { - channel_id: channel_id, - scriptpubkey: Script::new(), - }; - - let onion_routing_packet = msgs::OnionPacket { - version: 255, - public_key: Ok(unkown_public_key), - hop_data: [1; 20*65], - hmac: [2; 32] - }; - - let update_add_htlc_msg = msgs::UpdateAddHTLC { - channel_id: channel_id, - htlc_id: 0, - amount_msat: 1000000, - payment_hash: PaymentHash([1; 32]), - cltv_expiry: 821716, - onion_routing_packet - }; - - let commitment_signed_msg = msgs::CommitmentSigned { - channel_id: channel_id, - signature: signature, - htlc_signatures: Vec::new(), - }; - - let update_fee_msg = msgs::UpdateFee { - channel_id: channel_id, - feerate_per_kw: 1000, - }; - - let malformed_update_msg = msgs::UpdateFailMalformedHTLC{ - channel_id: channel_id, - htlc_id: 0, - sha256_of_onion: [1; 32], - failure_code: 0x8000, - }; - - let fulfill_update_msg = msgs::UpdateFulfillHTLC{ - channel_id: channel_id, - htlc_id: 0, - payment_preimage: PaymentPreimage([1; 32]), - }; - - let fail_update_msg = msgs::UpdateFailHTLC{ - channel_id: channel_id, - htlc_id: 0, - reason: msgs::OnionErrorPacket { data: Vec::new()}, - }; - - let revoke_and_ack_msg = msgs::RevokeAndACK { - channel_id: channel_id, - per_commitment_secret: [1; 32], - next_per_commitment_point: unkown_public_key, - }; - - // Test the API functions and message handlers. + // Test the API functions. check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key); - nodes[1].node.handle_open_channel(&unkown_public_key, &open_channel_msg); - - nodes[0].node.handle_accept_channel(&unkown_public_key, &accept_channel_msg); - - check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&open_channel_msg.temporary_channel_id, &unkown_public_key, 42), unkown_public_key); - - nodes[1].node.handle_funding_created(&unkown_public_key, &funding_created_msg); - - nodes[0].node.handle_funding_signed(&unkown_public_key, &funding_signed_msg); - - nodes[0].node.handle_channel_ready(&unkown_public_key, &channel_ready_msg); - - nodes[1].node.handle_announcement_signatures(&unkown_public_key, &announcement_signatures_msg); + check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key); check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key); @@ -8498,26 +8408,6 @@ mod tests { check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key); check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); - - nodes[0].node.handle_shutdown(&unkown_public_key, &shutdown_msg); - - nodes[1].node.handle_closing_signed(&unkown_public_key, &closing_signed_msg); - - nodes[0].node.handle_channel_reestablish(&unkown_public_key, &channel_reestablish_msg); - - nodes[1].node.handle_update_add_htlc(&unkown_public_key, &update_add_htlc_msg); - - nodes[1].node.handle_commitment_signed(&unkown_public_key, &commitment_signed_msg); - - nodes[1].node.handle_update_fail_malformed_htlc(&unkown_public_key, &malformed_update_msg); - - nodes[1].node.handle_update_fail_htlc(&unkown_public_key, &fail_update_msg); - - nodes[1].node.handle_update_fulfill_htlc(&unkown_public_key, &fulfill_update_msg); - - nodes[1].node.handle_revoke_and_ack(&unkown_public_key, &revoke_and_ack_msg); - - nodes[1].node.handle_update_fee(&unkown_public_key, &update_fee_msg); } #[cfg(anchors)] @@ -8605,7 +8495,8 @@ pub mod bench { let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let logger_a = test_utils::TestLogger::with_id("node a".to_owned()); - let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a))); + let scorer = Mutex::new(test_utils::TestScorer::new()); + let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)), &scorer); let mut config: UserConfig = Default::default(); config.channel_handshake_config.minimum_depth = 1; @@ -8696,7 +8587,7 @@ pub mod bench { let usable_channels = $node_a.list_usable_channels(); let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) .with_features($node_b.invoice_features()); - let scorer = test_utils::TestScorer::with_penalty(0); + let scorer = test_utils::TestScorer::new(); let seed = [3u8; 32]; let keys_manager = KeysManager::new(&seed, 42, 42); let random_seed_bytes = keys_manager.get_secure_random_bytes();