}
}
}
-#[cfg(not(feature = "grind_signatures"))]
-#[cfg(test)]
impl HTLCSource {
+ #[cfg(not(feature = "grind_signatures"))]
+ #[cfg(test)]
pub fn dummy() -> Self {
HTLCSource::OutboundRoute {
path: Vec::new(),
payment_id: PaymentId([2; 32]),
}
}
+
+ #[cfg(debug_assertions)]
+ /// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
+ /// transaction. Useful to ensure different datastructures match up.
+ pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
+ if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
+ *first_hop_htlc_msat == htlc.amount_msat
+ } else {
+ // There's nothing we can check for forwarded HTLCs
+ true
+ }
+ }
}
struct ReceiveError {
}
}
let mut total_value = claimable_htlc.sender_intended_value;
+ let mut earliest_expiry = claimable_htlc.cltv_expiry;
for htlc in htlcs.iter() {
total_value += htlc.sender_intended_value;
+ earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
match &htlc.onion_payload {
OnionPayload::Invoice { .. } => {
if htlc.total_msat != $payment_data.total_msat {
amount_msat,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
+ claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
});
payment_claimable_generated = true;
} else {
hash_map::Entry::Vacant(e) => {
let amount_msat = claimable_htlc.value;
claimable_htlc.total_value_received = Some(amount_msat);
+ let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc]));
let prev_channel_id = prev_funding_outpoint.to_channel_id();
purpose,
via_channel_id: Some(prev_channel_id),
via_user_channel_id: Some(prev_user_channel_id),
+ claim_deadline,
});
},
hash_map::Entry::Occupied(_) => {
/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
/// [`MessageSendEvent`]s needed to claim the payment.
///
- /// Note that calling this method does *not* guarantee that the payment has been claimed. You
- /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
- /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+ /// This method is guaranteed to ensure the payment has been claimed but only if the current
+ /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
+ /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
+ /// successful. It will generally be available in the next [`process_pending_events`] call.
///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
+ /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
/// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
};
debug_assert!(!sources.is_empty());
- // If we are claiming an MPP payment, we check that all channels which contain a claimable
- // HTLC still exist. While this isn't guaranteed to remain true if a channel closes while
- // we're claiming (or even after we claim, before the commitment update dance completes),
- // it should be a relatively rare race, and we'd rather not claim HTLCs that require us to
- // go on-chain (and lose the on-chain fee to do so) than just reject the payment.
- //
- // Note that we'll still always get our funds - as long as the generated
- // `ChannelMonitorUpdate` makes it out to the relevant monitor we can claim on-chain.
- //
- // If we find an HTLC which we would need to claim but for which we do not have a
- // channel, we will fail all parts of the MPP payment. While we could wait and see if
- // the sender retries the already-failed path(s), it should be a pretty rare case where
- // we got all the HTLCs and then a channel closed while we were waiting for the user to
- // provide the preimage, so worrying too much about the optimal handling isn't worth
- // it.
+ // Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
+ // and when we got here we need to check that the amount we're about to claim matches the
+ // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
+ // the MPP parts all have the same `total_msat`.
let mut claimable_amt_msat = 0;
let mut prev_total_msat = None;
let mut expected_amt_msat = None;
let mut errs = Vec::new();
let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
- let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
- Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
- None => {
- valid_mpp = false;
- break;
- }
- };
-
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- valid_mpp = false;
- break;
- }
-
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
-
- if peer_state.channel_by_id.get(&chan_id).is_none() {
- valid_mpp = false;
- break;
- }
-
if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
debug_assert!(false);
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let per_peer_state = self.per_peer_state.read().unwrap();
- let chan_id = prev_hop.outpoint.to_channel_id();
- let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
- Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
- None => None
- };
+ {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let chan_id = prev_hop.outpoint.to_channel_id();
+ let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+ Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
+ None => None
+ };
- let peer_state_opt = counterparty_node_id_opt.as_ref().map(
- |counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
- |peer_mutex| peer_mutex.lock().unwrap()
- )
- ).unwrap_or(None);
+ let peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ |counterparty_node_id| per_peer_state.get(counterparty_node_id)
+ .map(|peer_mutex| peer_mutex.lock().unwrap())
+ ).unwrap_or(None);
- if peer_state_opt.is_some() {
- let mut peer_state_lock = peer_state_opt.unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- log_bytes!(chan_id), action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
- }
- let update_id = monitor_update.update_id;
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
- let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
- peer_state, per_peer_state, chan);
- if let Err(e) = res {
- // TODO: This is a *critical* error - we probably updated the outbound edge
- // of the HTLC's monitor with a preimage. We should retry this monitor
- // update over and over again until morale improves.
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
- return Err((counterparty_node_id, e));
+ if peer_state_opt.is_some() {
+ let mut peer_state_lock = peer_state_opt.unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ log_bytes!(chan_id), action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ let update_id = monitor_update.update_id;
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
+ let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
+ peer_state, per_peer_state, chan);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
}
+ return Ok(());
}
- return Ok(());
}
}
let preimage_update = ChannelMonitorUpdate {