X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=3748c5688da81f9f9ac493ac059573b71b79a67b;hb=7a37e2cd23ee1d563ad2b21f115d713b15d9e8a0;hp=7c84df75eb1c6e66b8d94a8f47befde4f6f414e4;hpb=36235c38f16dd99f62d1b3eb458d69211e4fdef6;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7c84df75..3748c568 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,7 +36,7 @@ use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, Fee use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::events; -use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; +use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; @@ -306,9 +306,9 @@ impl core::hash::Hash for HTLCSource { } } } -#[cfg(not(feature = "grind_signatures"))] -#[cfg(test)] impl HTLCSource { + #[cfg(not(feature = "grind_signatures"))] + #[cfg(test)] pub fn dummy() -> Self { HTLCSource::OutboundRoute { path: Vec::new(), @@ -317,6 +317,18 @@ impl HTLCSource { payment_id: PaymentId([2; 32]), } } + + #[cfg(debug_assertions)] + /// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment + /// transaction. Useful to ensure different datastructures match up. + pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool { + if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self { + *first_hop_htlc_msat == htlc.amount_msat + } else { + // There's nothing we can check for forwarded HTLCs + true + } + } } struct ReceiveError { @@ -1342,15 +1354,15 @@ pub struct PhantomRouteHints { } macro_rules! handle_error { - ($self: ident, $internal: expr, $counterparty_node_id: expr) => { + ($self: ident, $internal: expr, $counterparty_node_id: expr) => { { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); + debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { - // In testing, ensure there are no deadlocks where the lock is already held upon - // entering the macro. - debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread); - debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); - let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { @@ -1389,7 +1401,7 @@ macro_rules! handle_error { Err(err) }, } - } + } } } macro_rules! update_maps_on_chan_removal { @@ -2699,7 +2711,7 @@ where /// [`Event::PaymentSent`]: events::Event::PaymentSent pub fn abandon_payment(&self, payment_id: PaymentId) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events); + self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events); } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -2774,29 +2786,34 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - let (chan, msg) = { - let (res, chan) = { - match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(mut chan) => { - let funding_txo = find_funding_output(&chan, &funding_transaction)?; - - (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) - .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) - } else { unreachable!(); }) - , chan) + let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) { + Some(mut chan) => { + let funding_txo = find_funding_output(&chan, &funding_transaction)?; + + let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) + .map_err(|e| if let ChannelError::Close(msg) = e { + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) + } else { unreachable!(); }); + match funding_res { + Ok(funding_msg) => (funding_msg, chan), + Err(_) => { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id()); + return Err(APIError::ChannelUnavailable { + err: "Signer refused to sign the initial commitment transaction".to_owned() + }); }, - None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) }, } - }; - match handle_error!(self, res, chan.get_counterparty_node_id()) { - Ok(funding_msg) => { - (chan, funding_msg) - }, - Err(_) => { return Err(APIError::ChannelUnavailable { - err: "Signer refused to sign the initial commitment transaction".to_owned() - }) }, - } + }, + None => { + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + log_bytes!(*temporary_channel_id), counterparty_node_id), + }) + }, }; peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { @@ -3347,8 +3364,10 @@ where } } let mut total_value = claimable_htlc.sender_intended_value; + let mut earliest_expiry = claimable_htlc.cltv_expiry; for htlc in htlcs.iter() { total_value += htlc.sender_intended_value; + earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry); match &htlc.onion_payload { OnionPayload::Invoice { .. } => { if htlc.total_msat != $payment_data.total_msat { @@ -3381,6 +3400,7 @@ where amount_msat, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), + claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER), }); payment_claimable_generated = true; } else { @@ -3434,6 +3454,7 @@ where hash_map::Entry::Vacant(e) => { let amount_msat = claimable_htlc.value; claimable_htlc.total_value_received = Some(amount_msat); + let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER); let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); e.insert((purpose.clone(), vec![claimable_htlc])); let prev_channel_id = prev_funding_outpoint.to_channel_id(); @@ -3444,6 +3465,7 @@ where purpose, via_channel_id: Some(prev_channel_id), via_user_channel_id: Some(prev_user_channel_id), + claim_deadline, }); }, hash_map::Entry::Occupied(_) => { @@ -3919,9 +3941,10 @@ where /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any /// [`MessageSendEvent`]s needed to claim the payment. /// - /// Note that calling this method does *not* guarantee that the payment has been claimed. You - /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be - /// provided to your [`EventHandler`] when [`process_pending_events`] is next called. + /// This method is guaranteed to ensure the payment has been claimed but only if the current + /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race + /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment + /// successful. It will generally be available in the next [`process_pending_events`] call. /// /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable` @@ -3929,6 +3952,7 @@ where /// the sender "proof-of-payment" when they did not fulfill the full expected payment. /// /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable + /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed /// [`process_pending_events`]: EventsProvider::process_pending_events /// [`create_inbound_payment`]: Self::create_inbound_payment @@ -3965,21 +3989,10 @@ where }; debug_assert!(!sources.is_empty()); - // If we are claiming an MPP payment, we check that all channels which contain a claimable - // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while - // we're claiming (or even after we claim, before the commitment update dance completes), - // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to - // go on-chain (and lose the on-chain fee to do so) than just reject the payment. - // - // Note that we'll still always get our funds - as long as the generated - // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain. - // - // If we find an HTLC which we would need to claim but for which we do not have a - // channel, we will fail all parts of the MPP payment. While we could wait and see if - // the sender retries the already-failed path(s), it should be a pretty rare case where - // we got all the HTLCs and then a channel closed while we were waiting for the user to - // provide the preimage, so worrying too much about the optimal handling isn't worth - // it. + // Just in case one HTLC has been failed between when we generated the `PaymentClaimable` + // and when we got here we need to check that the amount we're about to claim matches the + // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that + // the MPP parts all have the same `total_msat`. let mut claimable_amt_msat = 0; let mut prev_total_msat = None; let mut expected_amt_msat = None; @@ -3987,28 +4000,6 @@ where let mut errs = Vec::new(); let per_peer_state = self.per_peer_state.read().unwrap(); for htlc in sources.iter() { - let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) { - Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()), - None => { - valid_mpp = false; - break; - } - }; - - let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); - if peer_state_mutex_opt.is_none() { - valid_mpp = false; - break; - } - - let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); - let peer_state = &mut *peer_state_lock; - - if peer_state.channel_by_id.get(&chan_id).is_none() { - valid_mpp = false; - break; - } - if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) { log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!"); debug_assert!(false); @@ -4088,45 +4079,46 @@ where -> Result<(), (PublicKey, MsgHandleErrInternal)> { //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let per_peer_state = self.per_peer_state.read().unwrap(); - let chan_id = prev_hop.outpoint.to_channel_id(); - let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { - Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), - None => None - }; + { + let per_peer_state = self.per_peer_state.read().unwrap(); + let chan_id = prev_hop.outpoint.to_channel_id(); + let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { + Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), + None => None + }; - let peer_state_opt = counterparty_node_id_opt.as_ref().map( - |counterparty_node_id| per_peer_state.get(counterparty_node_id).map( - |peer_mutex| peer_mutex.lock().unwrap() - ) - ).unwrap_or(None); + let peer_state_opt = counterparty_node_id_opt.as_ref().map( + |counterparty_node_id| per_peer_state.get(counterparty_node_id) + .map(|peer_mutex| peer_mutex.lock().unwrap()) + ).unwrap_or(None); - if peer_state_opt.is_some() { - let mut peer_state_lock = peer_state_opt.unwrap(); - let peer_state = &mut *peer_state_lock; - if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { - let counterparty_node_id = chan.get().get_counterparty_node_id(); - let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); - - if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { - if let Some(action) = completion_action(Some(htlc_value_msat)) { - log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", - log_bytes!(chan_id), action); - peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); - } - let update_id = monitor_update.update_id; - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update); - let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, - peer_state, per_peer_state, chan); - if let Err(e) = res { - // TODO: This is a *critical* error - we probably updated the outbound edge - // of the HTLC's monitor with a preimage. We should retry this monitor - // update over and over again until morale improves. - log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); - return Err((counterparty_node_id, e)); + if peer_state_opt.is_some() { + let mut peer_state_lock = peer_state_opt.unwrap(); + let peer_state = &mut *peer_state_lock; + if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) { + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger); + + if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res { + if let Some(action) = completion_action(Some(htlc_value_msat)) { + log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}", + log_bytes!(chan_id), action); + peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); + } + let update_id = monitor_update.update_id; + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update); + let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, + peer_state, per_peer_state, chan); + if let Err(e) = res { + // TODO: This is a *critical* error - we probably updated the outbound edge + // of the HTLC's monitor with a preimage. We should retry this monitor + // update over and over again until morale improves. + log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage); + return Err((counterparty_node_id, e)); + } } + return Ok(()); } - return Ok(()); } } let preimage_update = ChannelMonitorUpdate {