From: Matt Corallo Date: Sun, 15 Sep 2024 23:50:31 +0000 (+0000) Subject: Pass info about claimed payments, incl HTLCs to `ChannelMonitor`s X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=b8661ef6cf7f13c9f904fbd25c59fb59415f0d73;p=rust-lightning Pass info about claimed payments, incl HTLCs to `ChannelMonitor`s When we claim an MPP payment, then crash before persisting all the relevant `ChannelMonitor`s, we rely on the payment data being available in the `ChannelManager` on restart to re-claim any parts that haven't yet been claimed. This is fine as long as the `ChannelManager` was persisted before the `PaymentClaimable` event was processed, which is generally the case in our `lightning-background-processor`, but may not be in other cases or in a somewhat rare race. In order to fix this, we need to track where all the MPP parts of a payment are in the `ChannelMonitor`, allowing us to re-claim any missing pieces without reference to any `ChannelManager` data. Further, in order to properly generate a `PaymentClaimed` event against the re-started claim, we have to store various payment metadata with the HTLC list as well. Here we take the first step, building a list of MPP parts and metadata in `ChannelManager` and passing it through to `ChannelMonitor` in the `ChannelMonitorUpdate`s. --- diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index d29c806ee..428221d8d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -38,7 +38,7 @@ use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::ln::msgs::DecodeError; use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint}; use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys}; -use crate::ln::channelmanager::{HTLCSource, SentHTLCId}; +use crate::ln::channelmanager::{HTLCSource, SentHTLCId, PaymentClaimDetails}; use crate::chain; use crate::chain::{BestBlock, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; @@ -546,6 +546,9 @@ pub(crate) enum ChannelMonitorUpdateStep { }, PaymentPreimage { payment_preimage: PaymentPreimage, + /// If this preimage was from an inbound payment claim, information about the claim should + /// be included here to enable claim replay on startup. + payment_info: Option, }, CommitmentSecret { idx: u64, @@ -594,6 +597,7 @@ impl_writeable_tlv_based_enum_upgradable!(ChannelMonitorUpdateStep, }, (2, PaymentPreimage) => { (0, payment_preimage, required), + (1, payment_info, option), }, (3, CommitmentSecret) => { (0, idx, required), @@ -1502,8 +1506,11 @@ impl ChannelMonitor { { let mut inner = self.inner.lock().unwrap(); let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash)); + // Note that we don't pass any MPP claim parts here. This is generally not okay but in this + // case is acceptable as we only call this method from `ChannelManager` deserialization in + // cases where we are replaying a claim started on a previous version of LDK. inner.provide_payment_preimage( - payment_hash, payment_preimage, broadcaster, fee_estimator, &logger) + payment_hash, payment_preimage, &None, broadcaster, fee_estimator, &logger) } /// Updates a ChannelMonitor on the basis of some new information provided by the Channel @@ -2930,12 +2937,14 @@ impl ChannelMonitorImpl { /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all /// commitment_tx_infos which contain the payment hash have been revoked. fn provide_payment_preimage( - &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B, + &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, + payment_info: &Option, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithChannelMonitor) where B::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, { + // TODO: Store payment_info (but do not override any existing values) self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone()); let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| { @@ -3139,9 +3148,9 @@ impl ChannelMonitorImpl { log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger) }, - ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => { + ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, payment_info } => { log_trace!(logger, "Updating ChannelMonitor with payment preimage"); - self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger) + self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator, logger) }, ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => { log_trace!(logger, "Updating ChannelMonitor with commitment secret"); @@ -5097,8 +5106,12 @@ mod tests { assert_eq!(replay_update.updates.len(), 1); if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] { } else { panic!(); } - replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 }); - replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 }); + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_1, payment_info: None, + }); + replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage_2, payment_info: None, + }); let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks)); assert!( diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 42458e476..42e4b2f3e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -32,7 +32,7 @@ use crate::ln::msgs; use crate::ln::msgs::{ClosingSigned, ClosingSignedFeeRange, DecodeError}; use crate::ln::script::{self, ShutdownScript}; use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails}; -use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentClaimDetails, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{ CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, @@ -4039,14 +4039,17 @@ impl Channel where // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger); + let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. } } - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { + fn get_update_fulfill_htlc( + &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, + payment_info: Option, logger: &L, + ) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -4104,6 +4107,7 @@ impl Channel where counterparty_node_id: Some(self.context.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_arg.clone(), + payment_info, }], channel_id: Some(self.context.channel_id()), }; @@ -4171,9 +4175,12 @@ impl Channel where } } - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { + pub fn get_update_fulfill_htlc_and_commit( + &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, + payment_info: Option, logger: &L, + ) -> UpdateFulfillCommitFetch where L::Target: Logger { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) { UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { // Even if we aren't supposed to let new monitor updates with commitment state // updates run, we still need to push the preimage ChannelMonitorUpdateStep no @@ -4934,9 +4941,14 @@ impl Channel where // not fail - any in between attempts to claim the HTLC will have resulted // in it hitting the holding cell again and we cannot change the state of a // holding cell HTLC from fulfill to anything else. + // + // Note that we should have already provided a preimage-containing + // `ChannelMonitorUpdate` to the user, making this one redundant, however + // there's no harm in including the extra `ChannelMonitorUpdateStep` here. + // We do not bother to track and include `payment_info` here, however. let mut additional_monitor_update = if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = - self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) + self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger) { monitor_update } else { unreachable!() }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c1750c5af..699430749 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -801,6 +801,7 @@ pub(super) enum RAACommitmentOrder { } /// Information about a payment which is currently being claimed. +#[derive(Clone, Debug, PartialEq, Eq)] struct ClaimingPayment { amount_msat: u64, payment_purpose: events::PaymentPurpose, @@ -1072,12 +1073,36 @@ struct MPPClaimHTLCSource { htlc_id: u64, } +impl_writeable_tlv_based!(MPPClaimHTLCSource, { + (0, counterparty_node_id, required), + (2, funding_txo, required), + (4, channel_id, required), + (6, htlc_id, required), +}); + #[derive(Debug)] pub(crate) struct PendingMPPClaim { channels_without_preimage: Vec, channels_with_preimage: Vec, } +#[derive(Clone, Debug, PartialEq, Eq)] +/// When we're claiming a(n MPP) payment, we want to store information about thay payment in the +/// [`ChannelMonitor`] so that we can replay the claim without any information from the +/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims +/// against all MPP parts as well as generate an [`Event::PaymentClaimed`]. +pub(crate) struct PaymentClaimDetails { + mpp_parts: Vec, + /// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the + /// [`Event::PaymentClaimed`]. + claiming_payment: ClaimingPayment, +} + +impl_writeable_tlv_based!(PaymentClaimDetails, { + (0, mpp_parts, required_vec), + (2, claiming_payment, required), +}); + #[derive(Clone)] pub(crate) struct PendingMPPClaimPointer(Arc>); @@ -6675,6 +6700,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let claiming_payment; let sources = { let mut claimable_payments = self.claimable_payments.lock().unwrap(); if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) { @@ -6689,7 +6715,7 @@ where } let payment_id = payment.inbound_payment_id(&self.inbound_payment_id_secret); - let claiming_payment = claimable_payments.pending_claiming_payments + claiming_payment = claimable_payments.pending_claiming_payments .entry(payment_hash) .and_modify(|_| { debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); @@ -6708,7 +6734,7 @@ where onion_fields: payment.onion_fields, payment_id: Some(payment_id), } - }); + }).clone(); if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields { if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) { @@ -6772,26 +6798,27 @@ where return; } if valid_mpp { + let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| { + if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { + Some(MPPClaimHTLCSource { + counterparty_node_id: cp_id, + funding_txo: htlc.prev_hop.outpoint, + channel_id: htlc.prev_hop.channel_id, + htlc_id: htlc.prev_hop.htlc_id, + }) + } else { + None + } + }).collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { - let channels_without_preimage = sources.iter().filter_map(|htlc| { - if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { - Some(MPPClaimHTLCSource { - counterparty_node_id: cp_id, - funding_txo: htlc.prev_hop.outpoint, - channel_id: htlc.prev_hop.channel_id, - htlc_id: htlc.prev_hop.htlc_id, - }) - } else { - None - } - }).collect(); Some(Arc::new(Mutex::new(PendingMPPClaim { - channels_without_preimage, + channels_without_preimage: mpp_parts.clone(), channels_with_preimage: Vec::new(), }))) } else { None }; + let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment }); for htlc in sources { let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { @@ -6807,7 +6834,7 @@ where } }); self.claim_funds_from_hop( - htlc.prev_hop, payment_preimage, + htlc.prev_hop, payment_preimage, payment_info.clone(), |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker) @@ -6837,7 +6864,7 @@ where ComplFunc: FnOnce(Option, bool) -> (Option, Option) >( &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, - completion_action: ComplFunc, + payment_info: Option, completion_action: ComplFunc, ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! @@ -6871,7 +6898,8 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let counterparty_node_id = chan.context.get_counterparty_node_id(); let logger = WithChannelContext::from(&self.logger, &chan.context, None); - let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger); + let fulfill_res = + chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); match fulfill_res { UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { @@ -6959,6 +6987,7 @@ where counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, + payment_info, }], channel_id: Some(prev_hop.channel_id), }; @@ -7069,7 +7098,7 @@ where let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; - self.claim_funds_from_hop(hop_data, payment_preimage, + self.claim_funds_from_hop(hop_data, payment_preimage, None, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { @@ -7105,7 +7134,7 @@ where if *funding_txo == claiming_chan_funding_outpoint { assert!(update.updates.iter().any(|upd| if let ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: update_preimage + payment_preimage: update_preimage, .. } = upd { payment_preimage == *update_preimage } else { false } diff --git a/pending_changelog/3322-a.txt b/pending_changelog/3322-a.txt new file mode 100644 index 000000000..83849926a --- /dev/null +++ b/pending_changelog/3322-a.txt @@ -0,0 +1,6 @@ +API Changes +=========== + +Additional information is now stored in `ChannelMonitorUpdate`s which may increase the size of +`ChannelMonitorUpdate`s claiming inbound payments substantially. The expected maximum size of +`ChannelMonitorUpdate`s shouldn't change materially.