X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=d47fbe0babd17d98d1674c58377d7bf3942bbeb8;hb=d0b8f455fe86d3e55231d35d35dd96693abe2049;hp=a1148b692b102b15754386bfc620a116f4d5fb8d;hpb=5e9e68ad689bc0a4278a31bf5241c90648dcbe1d;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a1148b69..d47fbe0b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -70,7 +70,7 @@ use crate::prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use crate::io::Read; -use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock}; +use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; @@ -1190,9 +1190,9 @@ pub enum RecentPaymentDetails { /// made before LDK version 0.0.104. payment_hash: Option, }, - /// After a payment is explicitly abandoned by calling [`ChannelManager::abandon_payment`], it - /// is marked as abandoned until an [`Event::PaymentFailed`] is generated. A payment could also - /// be marked as abandoned if pathfinding fails repeatedly or retries have been exhausted. + /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly + /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all + /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated. Abandoned { /// Hash of the payment that we have given up trying to send. payment_hash: PaymentHash, @@ -1218,13 +1218,10 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - #[cfg(any(feature = "_test_utils", test))] - { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert!($self.pending_events.try_lock().is_ok()); - debug_assert!($self.per_peer_state.try_write().is_ok()); - } + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); let mut msg_events = Vec::with_capacity(2); @@ -1718,7 +1715,7 @@ where /// /// This can be useful for payments that may have been prepared, but ultimately not sent, as a /// result of a crash. If such a payment exists, is not listed here, and an - /// [`Event::PaymentSent`] has not been received, you may consider retrying the payment. + /// [`Event::PaymentSent`] has not been received, you may consider resending the payment. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn list_recent_payments(&self) -> Vec { @@ -2475,8 +2472,8 @@ where /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an - /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same - /// [`PaymentId`]. + /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a + /// second payment with the same [`PaymentId`]. /// /// Thus, in order to ensure duplicate payments are not sent, you should implement your own /// tracking of payments, including state to indicate once a payment has completed. Because you @@ -2521,6 +2518,7 @@ where /// [`Route`], we assume the invoice had the basic_mpp feature set. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent + /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { @@ -2558,48 +2556,25 @@ where } - /// Retries a payment along the given [`Route`]. - /// - /// Errors returned are a superset of those returned from [`send_payment`], so see - /// [`send_payment`] documentation for more details on errors. This method will also error if the - /// retry amount puts the payment more than 10% over the payment's total amount, if the payment - /// for the given `payment_id` cannot be found (likely due to timeout or success), or if - /// further retries have been disabled with [`abandon_payment`]. - /// - /// [`send_payment`]: [`ChannelManager::send_payment`] - /// [`abandon_payment`]: [`ChannelManager::abandon_payment`] - pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { - let best_block_height = self.best_block.read().unwrap().height(); - self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.entropy_source, &self.node_signer, best_block_height, - |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| - self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)) - } - - /// Signals that no further retries for the given payment will occur. + /// Signals that no further retries for the given payment should occur. Useful if you have a + /// pending outbound payment with retries remaining, but wish to stop retrying the payment before + /// retries are exhausted. /// - /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id` - /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be - /// generated as soon as there are no remaining pending HTLCs for this payment. + /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon + /// as there are no remaining pending HTLCs for this payment. /// /// Note that calling this method does *not* prevent a payment from succeeding. You must still /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to /// determine the ultimate status of a payment. /// /// If an [`Event::PaymentFailed`] event is generated and we restart without this - /// [`ChannelManager`] having been persisted, the payment may still be in the pending state - /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call - /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to - /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`]. + /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated. /// - /// [`abandon_payment`]: Self::abandon_payment - /// [`retry_payment`]: Self::retry_payment /// [`Event::PaymentFailed`]: events::Event::PaymentFailed /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn abandon_payment(&self, payment_id: PaymentId) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) { - self.pending_events.lock().unwrap().push(payment_failed_ev); - } + self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events); } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -3372,7 +3347,8 @@ where let best_block_height = self.best_block.read().unwrap().height(); self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height, + &self.pending_events, &self.logger, |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv| self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv)); @@ -3743,17 +3719,12 @@ where /// Fails an HTLC backwards to the sender of it to us. /// Note that we do not assume that channels corresponding to failed HTLCs are still available. fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) { - #[cfg(any(feature = "_test_utils", test))] - { - // Ensure that the peer state channel storage lock is not held when calling this - // function. - // This ensures that future code doesn't introduce a lock_order requirement for - // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling - // this function with any `per_peer_state` peer lock aquired would. - let per_peer_state = self.per_peer_state.read().unwrap(); - for (_, peer) in per_peer_state.iter() { - debug_assert!(peer.try_lock().is_ok()); - } + // Ensure that no peer state channel storage lock is held when calling this function. + // This ensures that future code doesn't introduce a lock-order requirement for + // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling + // this function with any `per_peer_state` peer lock acquired would. + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } //TODO: There is a timing attack here where if a node fails an HTLC back to us they can @@ -7723,7 +7694,7 @@ where inbound_payment_key: expanded_inbound_key, pending_inbound_payments: Mutex::new(pending_inbound_payments), - pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) }, + pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()), }, pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()), forward_htlcs: Mutex::new(forward_htlcs),