X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=fefa002ee7c932f101d2ea04af1af4bc9b3789c1;hb=29ed2864f32e4748f67eb64a5cc74c8a6defe4b4;hp=db326a2ecb5880ce5f5fe0636f6799c47d9d38ab;hpb=5b53670172d7415c953ca7e7fbffcd2d63f058f8;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index db326a2e..fefa002e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -45,7 +45,7 @@ use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, No #[cfg(any(feature = "_test_utils", test))] use crate::ln::features::InvoiceFeatures; use crate::routing::gossip::NetworkGraph; -use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, Router}; +use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router}; use crate::routing::scoring::ProbabilisticScorer; use crate::ln::msgs; use crate::ln::onion_utils; @@ -53,9 +53,9 @@ use crate::ln::onion_utils::HTLCFailReason; use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; #[cfg(test)] use crate::ln::outbound_payment; -use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment}; +use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment}; use crate::ln::wire::Encode; -use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, Sign, SignerProvider}; +use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner}; use crate::util::config::{UserConfig, ChannelConfig}; use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::util::events; @@ -76,7 +76,7 @@ use core::time::Duration; use core::ops::Deref; // Re-export this for use in the public API. -pub use crate::ln::outbound_payment::PaymentSendFailure; +pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry}; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -245,6 +245,10 @@ pub(crate) enum HTLCSource { first_hop_htlc_msat: u64, payment_id: PaymentId, payment_secret: Option, + /// Note that this is now "deprecated" - we write it for forwards (and read it for + /// backwards) compatibility reasons, but prefer to use the data in the + /// [`super::outbound_payment`] module, which stores per-payment data once instead of in + /// each HTLC. payment_params: Option, }, } @@ -289,6 +293,25 @@ struct ReceiveError { msg: &'static str, } +/// This enum is used to specify which error data to send to peers when failing back an HTLC +/// using [`ChannelManager::fail_htlc_backwards_with_reason`]. +/// +/// For more info on failure codes, see . +#[derive(Clone, Copy)] +pub enum FailureCode { + /// We had a temporary error processing the payment. Useful if no other error codes fit + /// and you want to indicate that the payer may want to retry. + TemporaryNodeFailure = 0x2000 | 2, + /// We have a required feature which was not in this onion. For example, you may require + /// some additional metadata that was not provided with this payment. + RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3, + /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of + /// the HTLC is too close to the current block height for safe handling. + /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is + /// equivalent to calling [`ChannelManager::fail_htlc_backwards`]. + IncorrectOrUnknownPaymentDetails = 0x4000 | 15, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the peer_state mutex boundary. When an Err is generated for a @@ -388,7 +411,7 @@ impl MsgHandleErrInternal { /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited). /// This provides some limited amount of privacy. Ideally this would range from somewhere like one /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. -const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; +pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100; /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should /// be sent in the order they appear in the return value, however sometimes the order needs to be @@ -452,7 +475,7 @@ pub(crate) enum MonitorUpdateCompletionAction { } /// State we hold per-peer. -pub(super) struct PeerState { +pub(super) struct PeerState { /// `temporary_channel_id` or `channel_id` -> `channel`. /// /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a @@ -464,6 +487,10 @@ pub(super) struct PeerState { /// Messages to send to the peer - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, + /// The peer is currently connected (i.e. we've seen a + /// [`ChannelMessageHandler::peer_connected`] and no corresponding + /// [`ChannelMessageHandler::peer_disconnected`]. + is_connected: bool, } /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is @@ -1131,6 +1158,36 @@ impl ChannelDetails { } } +/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments. +/// These include payments that have yet to find a successful path, or have unresolved HTLCs. +#[derive(Debug, PartialEq)] +pub enum RecentPaymentDetails { + /// When a payment is still being sent and awaiting successful delivery. + Pending { + /// Hash of the payment that is currently being sent but has yet to be fulfilled or + /// abandoned. + payment_hash: PaymentHash, + /// Total amount (in msat, excluding fees) across all paths for this payment, + /// not just the amount currently inflight. + total_msat: u64, + }, + /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have + /// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the + /// payment is removed from tracking. + Fulfilled { + /// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`] + /// made before LDK version 0.0.104. + payment_hash: Option, + }, + /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it + /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also + /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted. + Abandoned { + /// Hash of the payment that we have given up trying to send. + payment_hash: PaymentHash, + }, +} + /// Route hints used in constructing invoices for [phantom node payents]. /// /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager @@ -1668,6 +1725,34 @@ where self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a + /// successful path, or have unresolved HTLCs. + /// + /// This can be useful for payments that may have been prepared, but ultimately not sent, as a + /// result of a crash. If such a payment exists, is not listed here, and an + /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment. + /// + /// [`Event::PaymentSent`]: events::Event::PaymentSent + pub fn list_recent_payments(&self) -> Vec { + self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter() + .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment { + PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => { + Some(RecentPaymentDetails::Pending { + payment_hash: *payment_hash, + total_msat: *total_msat, + }) + }, + PendingOutboundPayment::Abandoned { payment_hash, .. } => { + Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash }) + }, + PendingOutboundPayment::Fulfilled { payment_hash, .. } => { + Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash }) + }, + PendingOutboundPayment::Legacy { .. } => None + }) + .collect() + } + /// Helper function that issues the channel close events fn issue_channel_close_events(&self, channel: &Channel<::Signer>, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); @@ -2392,9 +2477,14 @@ where /// Sends a payment along a given route. /// - /// Value parameters are provided via the last hop in route, see documentation for RouteHop + /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`] /// fields for more info. /// + /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via + /// [`PeerManager::process_events`]). + /// + /// # Avoiding Duplicate Payments + /// /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an @@ -2407,12 +2497,16 @@ where /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes. /// - /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via - /// [`PeerManager::process_events`]). + /// Additionally, in the scenario where we begin the process of sending a payment, but crash + /// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're + /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See + /// [`ChannelManager::list_recent_payments`] for more information. + /// + /// # Possible Error States on [`PaymentSendFailure`] /// /// Each path may have a different return value, and PaymentSendValue may return a Vec with /// each entry matching the corresponding-index entry in the route paths, see - /// PaymentSendFailure for more info. + /// [`PaymentSendFailure`] for more info. /// /// In general, a path may raise: /// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee, @@ -2427,18 +2521,21 @@ where /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a /// different route unless you intend to pay twice! /// - /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate - /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For - /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route - /// must not contain multiple paths as multi-path payments require a recipient-provided - /// payment_secret. + /// # A caution on `payment_secret` + /// + /// `payment_secret` is unrelated to `payment_hash` (or [`PaymentPreimage`]) and exists to + /// authenticate the sender to the recipient and prevent payment-probing (deanonymization) + /// attacks. For newer nodes, it will be provided to you in the invoice. If you do not have one, + /// the [`Route`] must not contain multiple paths as multi-path payments require a + /// recipient-provided `payment_secret`. /// - /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature - /// bit set (either as required or as available). If multiple paths are present in the Route, - /// we assume the invoice had the basic_mpp feature set. + /// If a `payment_secret` *is* provided, we assume that the invoice had the payment_secret + /// feature bit set (either as required or as available). If multiple paths are present in the + /// [`Route`], we assume the invoice had the basic_mpp feature set. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events + /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments @@ -2447,6 +2544,18 @@ where self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } + /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on + /// `route_params` and retry failed payment paths based on `retry_strategy`. + pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> { + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments + .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params, + &self.router, self.list_usable_channels(), self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + } + #[cfg(test)] fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); @@ -2458,7 +2567,7 @@ where #[cfg(test)] pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option, payment_id: PaymentId, route: &Route) -> Result, PaymentSendFailure> { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.entropy_source, best_block_height) + self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, None, &self.entropy_source, best_block_height) } @@ -2523,7 +2632,21 @@ where /// [`send_payment`]: Self::send_payment pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option, payment_id: PaymentId) -> Result { let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, best_block_height, + self.pending_outbound_payments.send_spontaneous_payment_with_route( + route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer, + best_block_height, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) + } + + /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route + /// based on `route_params` and retry failed payment paths based on `retry_strategy`. + pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result { + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id, + retry_strategy, route_params, &self.router, self.list_usable_channels(), + self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, + &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) } @@ -3261,6 +3384,12 @@ where } } + let best_block_height = self.best_block.read().unwrap().height(); + self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| + self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination); } @@ -3483,21 +3612,40 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { + self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + } + + /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the + /// reason for the failure. + /// + /// See [`FailureCode`] for valid failure codes. + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { - let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); - htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } } } + /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + match failure_code { + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::IncorrectOrUnknownPaymentDetails => { + let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); + htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); + HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + } + } + } + /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code /// that we want to return and a channel. /// @@ -3701,7 +3849,7 @@ where let mut expected_amt_msat = None; let mut valid_mpp = true; let mut errs = Vec::new(); - let mut per_peer_state = Some(self.per_peer_state.read().unwrap()); + let per_peer_state = self.per_peer_state.read().unwrap(); for htlc in sources.iter() { let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), @@ -3711,13 +3859,13 @@ where } }; - if let None = per_peer_state.as_ref().unwrap().get(&counterparty_node_id) { + let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); + if let None = peer_state_mutex_opt { valid_mpp = false; break; } - let peer_state_mutex = per_peer_state.as_ref().unwrap().get(&counterparty_node_id).unwrap(); - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; if let None = peer_state.channel_by_id.get(&chan_id) { @@ -3746,14 +3894,13 @@ where claimable_amt_msat += htlc.value; } + mem::drop(per_peer_state); if sources.is_empty() || expected_amt_msat.is_none() { - mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!"); return; } if claimable_amt_msat != expected_amt_msat.unwrap() { - mem::drop(per_peer_state); self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.", expected_amt_msat.unwrap(), claimable_amt_msat); @@ -3761,8 +3908,7 @@ where } if valid_mpp { for htlc in sources.drain(..) { - if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); } - if let Err((pk, err)) = self.claim_funds_from_hop(per_peer_state.take().unwrap(), + if let Err((pk, err)) = self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })) { @@ -3774,7 +3920,6 @@ where } } } - mem::drop(per_peer_state); if !valid_mpp { for htlc in sources.drain(..) { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); @@ -3795,11 +3940,11 @@ where } fn claim_funds_from_hop) -> Option>(&self, - per_peer_state_lock: RwLockReadGuard::Signer>>>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! + let per_peer_state = self.per_peer_state.read().unwrap(); let chan_id = prev_hop.outpoint.to_channel_id(); let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { @@ -3807,83 +3952,76 @@ where None => None }; - let (found_channel, mut peer_state_opt) = if counterparty_node_id_opt.is_some() && per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).is_some() { - let peer_mutex = per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).unwrap(); - let peer_state = peer_mutex.lock().unwrap(); - let found_channel = peer_state.channel_by_id.contains_key(&chan_id); - (found_channel, Some(peer_state)) - } else { (false, None) }; - - if found_channel { - let peer_state = &mut *peer_state_opt.as_mut().unwrap(); - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); - match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok(msgs_monitor_option) => { - if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { - match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, - "Failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); - mem::drop(peer_state_opt); - mem::drop(per_peer_state_lock); - self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); - return Err((counterparty_node_id, err)); - } - } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); - } - mem::drop(peer_state_opt); - mem::drop(per_peer_state_lock); - self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); - Ok(()) - } else { - Ok(()) - } - }, - Err((e, monitor_update)) => { + let mut peer_state_opt = counterparty_node_id_opt.as_ref().map( + |counterparty_node_id| per_peer_state.get(counterparty_node_id).map( + |peer_mutex| peer_mutex.lock().unwrap() + ) + ).unwrap_or(None); + + if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id)) + { + let counterparty_node_id = chan.get().get_counterparty_node_id(); + match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { ChannelMonitorUpdateStatus::Completed => {}, e => { - // TODO: This needs to be handled somehow - if we receive a monitor update - // with a preimage we *must* somehow manage to propagate it to the upstream - // channel, or we must have an ability to receive the same update and try - // again on restart. - log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info }, - "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); - }, + let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(); + mem::drop(peer_state_opt); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); + return Err((counterparty_node_id, err)); + } } - let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id); - if drop { - chan.remove_entry(); + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); } mem::drop(peer_state_opt); - mem::drop(per_peer_state_lock); - self.handle_monitor_update_completion_actions(completion_action(None)); - Err((counterparty_node_id, res)) - }, - } - } else { - // We've held the peer_state mutex since finding the channel and setting - // found_channel to true, so the channel can't have been dropped. - unreachable!() + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat))); + Ok(()) + } else { + Ok(()) + } + }, + Err((e, monitor_update)) => { + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same update and try + // again on restart. + log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + payment_preimage, e); + }, + } + let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); + } + mem::drop(peer_state_opt); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(completion_action(None)); + Err((counterparty_node_id, res)) + }, } } else { let preimage_update = ChannelMonitorUpdate { @@ -3904,7 +4042,7 @@ where payment_preimage, update_res); } mem::drop(peer_state_opt); - mem::drop(per_peer_state_lock); + mem::drop(per_peer_state); // Note that we do process the completion action here. This totally could be a // duplicate claim, but we have no way of knowing without interrogating the // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are @@ -3926,7 +4064,7 @@ where }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(self.per_peer_state.read().unwrap(), hop_data, payment_preimage, + let res = self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat| { if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { @@ -5464,6 +5602,12 @@ where events.into_inner() } + #[cfg(feature = "_test_utils")] + pub fn push_pending_event(&self, event: events::Event) { + let mut events = self.pending_events.lock().unwrap(); + events.push(event); + } + #[cfg(test)] pub fn pop_pending_event(&self) -> Option { let mut events = self.pending_events.lock().unwrap(); @@ -5562,9 +5706,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if peer_state.pending_msg_events.len() > 0 { - let mut peer_pending_events = Vec::new(); - mem::swap(&mut peer_pending_events, &mut peer_state.pending_msg_events); - pending_events.append(&mut peer_pending_events); + pending_events.append(&mut peer_state.pending_msg_events); } } @@ -5730,8 +5872,8 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; for chan in peer_state.channel_by_id.values() { - if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { - res.push((funding_txo.txid, block_hash)); + if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) { + res.push((funding_txo.txid, Some(block_hash))); } } } @@ -6139,6 +6281,8 @@ where &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false, } }); + debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect"); + peer_state.is_connected = false; } } if no_channels_remain { @@ -6169,10 +6313,14 @@ where channel_by_id: HashMap::new(), latest_features: init_msg.features.clone(), pending_msg_events: Vec::new(), + is_connected: true, })); }, hash_map::Entry::Occupied(e) => { - e.get().lock().unwrap().latest_features = init_msg.features.clone(); + let mut peer_state = e.get().lock().unwrap(); + peer_state.latest_features = init_msg.features.clone(); + debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice"); + peer_state.is_connected = true; }, } } @@ -7191,6 +7339,7 @@ where channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()), latest_features: Readable::read(reader)?, pending_msg_events: Vec::new(), + is_connected: false, }; per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } @@ -7301,9 +7450,13 @@ where hash_map::Entry::Vacant(entry) => { let path_fee = path.get_path_fees(); entry.insert(PendingOutboundPayment::Retryable { + retry_strategy: None, + attempts: PaymentAttempts::new(), + payment_params: None, session_privs: [session_priv_bytes].iter().map(|a| *a).collect(), payment_hash: htlc.payment_hash, payment_secret, + keysend_preimage: None, // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), total_msat: path_amt, @@ -7812,7 +7965,7 @@ mod tests { // Next, attempt a keysend payment and make sure it fails. let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id()), + payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), final_value_msat: 100_000, final_cltv_expiry_delta: TEST_FINAL_CLTV, }; @@ -7900,12 +8053,10 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; @@ -7945,12 +8096,10 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, final_cltv_expiry_delta: 40, }; @@ -8515,7 +8664,7 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); - let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id()) + let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV) .with_features($node_b.invoice_features()); let scorer = test_utils::TestScorer::with_penalty(0); let seed = [3u8; 32];