use crate::ln::msgs::DecodeError;
use crate::ln::channel_keys::{DelayedPaymentKey, DelayedPaymentBasepoint, HtlcBasepoint, HtlcKey, RevocationKey, RevocationBasepoint};
use crate::ln::chan_utils::{self,CommitmentTransaction, CounterpartyCommitmentSecrets, HTLCOutputInCommitment, HTLCClaim, ChannelTransactionParameters, HolderCommitmentTransaction, TxCreationKeys};
-use crate::ln::channelmanager::{HTLCSource, SentHTLCId};
+use crate::ln::channelmanager::{HTLCSource, SentHTLCId, PaymentClaimDetails};
use crate::chain;
use crate::chain::{BestBlock, WatchedOutput};
use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
},
PaymentPreimage {
payment_preimage: PaymentPreimage,
+ /// If this preimage was from an inbound payment claim, information about the claim should
+ /// be included here to enable claim replay on startup.
+ payment_info: Option<PaymentClaimDetails>,
},
CommitmentSecret {
idx: u64,
},
(2, PaymentPreimage) => {
(0, payment_preimage, required),
+ (1, payment_info, option),
},
(3, CommitmentSecret) => {
(0, idx, required),
{
let mut inner = self.inner.lock().unwrap();
let logger = WithChannelMonitor::from_impl(logger, &*inner, Some(*payment_hash));
+ // Note that we don't pass any MPP claim parts here. This is generally not okay but in this
+ // case is acceptable as we only call this method from `ChannelManager` deserialization in
+ // cases where we are replaying a claim started on a previous version of LDK.
inner.provide_payment_preimage(
- payment_hash, payment_preimage, broadcaster, fee_estimator, &logger)
+ payment_hash, payment_preimage, &None, broadcaster, fee_estimator, &logger)
}
/// Updates a ChannelMonitor on the basis of some new information provided by the Channel
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
/// commitment_tx_infos which contain the payment hash have been revoked.
fn provide_payment_preimage<B: Deref, F: Deref, L: Deref>(
- &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage, broadcaster: &B,
+ &mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage,
+ payment_info: &Option<PaymentClaimDetails>, broadcaster: &B,
fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &WithChannelMonitor<L>)
where B::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
+ // TODO: Store payment_info (but do not override any existing values)
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| {
log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info");
self.provide_latest_counterparty_commitment_tx(*commitment_txid, htlc_outputs.clone(), *commitment_number, *their_per_commitment_point, logger)
},
- ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } => {
+ ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage, payment_info } => {
log_trace!(logger, "Updating ChannelMonitor with payment preimage");
- self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, broadcaster, &bounded_fee_estimator, logger)
+ self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array()), &payment_preimage, payment_info, broadcaster, &bounded_fee_estimator, logger)
},
ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } => {
log_trace!(logger, "Updating ChannelMonitor with commitment secret");
assert_eq!(replay_update.updates.len(), 1);
if let ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { .. } = replay_update.updates[0] {
} else { panic!(); }
- replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_1 });
- replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage: payment_preimage_2 });
+ replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_1, payment_info: None,
+ });
+ replay_update.updates.push(ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_2, payment_info: None,
+ });
let broadcaster = TestBroadcaster::with_blocks(Arc::clone(&nodes[1].blocks));
assert!(
use crate::ln::msgs::{ClosingSigned, ClosingSignedFeeRange, DecodeError};
use crate::ln::script::{self, ShutdownScript};
use crate::ln::channel_state::{ChannelShutdownState, CounterpartyForwardingInfo, InboundHTLCDetails, InboundHTLCStateDetails, OutboundHTLCDetails, OutboundHTLCStateDetails};
-use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentClaimDetails, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use crate::ln::chan_utils::{
CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight,
htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction,
// (see equivalent if condition there).
assert!(!self.context.channel_state.can_generate_new_commitment());
let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
- let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger);
self.context.latest_monitor_update_id = mon_update_id;
if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
}
}
- fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ fn get_update_fulfill_htlc<L: Deref>(
+ &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage,
+ payment_info: Option<PaymentClaimDetails>, logger: &L,
+ ) -> UpdateFulfillFetch where L::Target: Logger {
// Either ChannelReady got set (which means it won't be unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
counterparty_node_id: Some(self.context.counterparty_node_id),
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage: payment_preimage_arg.clone(),
+ payment_info,
}],
channel_id: Some(self.context.channel_id()),
};
}
}
- pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(
+ &mut self, htlc_id: u64, payment_preimage: PaymentPreimage,
+ payment_info: Option<PaymentClaimDetails>, logger: &L,
+ ) -> UpdateFulfillCommitFetch where L::Target: Logger {
let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
- match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) {
UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
// Even if we aren't supposed to let new monitor updates with commitment state
// updates run, we still need to push the preimage ChannelMonitorUpdateStep no
// not fail - any in between attempts to claim the HTLC will have resulted
// in it hitting the holding cell again and we cannot change the state of a
// holding cell HTLC from fulfill to anything else.
+ //
+ // Note that we should have already provided a preimage-containing
+ // `ChannelMonitorUpdate` to the user, making this one redundant, however
+ // there's no harm in including the extra `ChannelMonitorUpdateStep` here.
+ // We do not bother to track and include `payment_info` here, however.
let mut additional_monitor_update =
if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
- self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
+ self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger)
{ monitor_update } else { unreachable!() };
update_fulfill_count += 1;
monitor_update.updates.append(&mut additional_monitor_update.updates);
}
/// Information about a payment which is currently being claimed.
+#[derive(Clone, Debug, PartialEq, Eq)]
struct ClaimingPayment {
amount_msat: u64,
payment_purpose: events::PaymentPurpose,
htlc_id: u64,
}
+impl_writeable_tlv_based!(MPPClaimHTLCSource, {
+ (0, counterparty_node_id, required),
+ (2, funding_txo, required),
+ (4, channel_id, required),
+ (6, htlc_id, required),
+});
+
#[derive(Debug)]
pub(crate) struct PendingMPPClaim {
channels_without_preimage: Vec<MPPClaimHTLCSource>,
channels_with_preimage: Vec<MPPClaimHTLCSource>,
}
+#[derive(Clone, Debug, PartialEq, Eq)]
+/// When we're claiming a(n MPP) payment, we want to store information about thay payment in the
+/// [`ChannelMonitor`] so that we can replay the claim without any information from the
+/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
+/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
+pub(crate) struct PaymentClaimDetails {
+ mpp_parts: Vec<MPPClaimHTLCSource>,
+ /// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the
+ /// [`Event::PaymentClaimed`].
+ claiming_payment: ClaimingPayment,
+}
+
+impl_writeable_tlv_based!(PaymentClaimDetails, {
+ (0, mpp_parts, required_vec),
+ (2, claiming_payment, required),
+});
+
#[derive(Clone)]
pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let claiming_payment;
let sources = {
let mut claimable_payments = self.claimable_payments.lock().unwrap();
if let Some(payment) = claimable_payments.claimable_payments.remove(&payment_hash) {
}
let payment_id = payment.inbound_payment_id(&self.inbound_payment_id_secret);
- let claiming_payment = claimable_payments.pending_claiming_payments
+ claiming_payment = claimable_payments.pending_claiming_payments
.entry(payment_hash)
.and_modify(|_| {
debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
onion_fields: payment.onion_fields,
payment_id: Some(payment_id),
}
- });
+ }).clone();
if let Some(RecipientOnionFields { ref custom_tlvs, .. }) = claiming_payment.onion_fields {
if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
return;
}
if valid_mpp {
+ let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
+ if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
+ Some(MPPClaimHTLCSource {
+ counterparty_node_id: cp_id,
+ funding_txo: htlc.prev_hop.outpoint,
+ channel_id: htlc.prev_hop.channel_id,
+ htlc_id: htlc.prev_hop.htlc_id,
+ })
+ } else {
+ None
+ }
+ }).collect();
let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
- let channels_without_preimage = sources.iter().filter_map(|htlc| {
- if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
- Some(MPPClaimHTLCSource {
- counterparty_node_id: cp_id,
- funding_txo: htlc.prev_hop.outpoint,
- channel_id: htlc.prev_hop.channel_id,
- htlc_id: htlc.prev_hop.htlc_id,
- })
- } else {
- None
- }
- }).collect();
Some(Arc::new(Mutex::new(PendingMPPClaim {
- channels_without_preimage,
+ channels_without_preimage: mpp_parts.clone(),
channels_with_preimage: Vec::new(),
})))
} else {
None
};
+ let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
for htlc in sources {
let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
}
});
self.claim_funds_from_hop(
- htlc.prev_hop, payment_preimage,
+ htlc.prev_hop, payment_preimage, payment_info.clone(),
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
>(
&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
- completion_action: ComplFunc,
+ payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let logger = WithChannelContext::from(&self.logger, &chan.context, None);
- let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &&logger);
+ let fulfill_res =
+ chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
match fulfill_res {
UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
counterparty_node_id: None,
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
payment_preimage,
+ payment_info,
}],
channel_id: Some(prev_hop.channel_id),
};
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage, None,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
if *funding_txo == claiming_chan_funding_outpoint {
assert!(update.updates.iter().any(|upd|
if let ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: update_preimage
+ payment_preimage: update_preimage, ..
} = upd {
payment_preimage == *update_preimage
} else { false }
--- /dev/null
+API Changes
+===========
+
+Additional information is now stored in `ChannelMonitorUpdate`s which may increase the size of
+`ChannelMonitorUpdate`s claiming inbound payments substantially. The expected maximum size of
+`ChannelMonitorUpdate`s shouldn't change materially.