cltv_expiry: u32,
value: u64,
onion_payload: OnionPayload,
+ timer_ticks: u8,
}
/// A payment identifier used to uniquely identify a payment to LDK.
/// pending HTLCs in flight.
pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
+pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
+
/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
phantom_shared_secret,
},
value: amt_to_forward,
+ timer_ticks: 0,
cltv_expiry,
onion_payload,
};
let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut handle_errors = Vec::new();
+ let mut timed_out_mpp_htlcs = Vec::new();
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
true
});
+
+ channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+ if htlcs.is_empty() {
+ // This should be unreachable
+ debug_assert!(false);
+ return false;
+ }
+ if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
+ // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+ // In this case we're not going to handle any timeouts of the parts here.
+ if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ return true;
+ } else if htlcs.into_iter().any(|htlc| {
+ htlc.timer_ticks += 1;
+ return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+ }) {
+ timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
+ return false;
+ }
+ }
+ true
+ });
+ }
+
+ for htlc_source in timed_out_mpp_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
};
Ok(Self {
prev_hop: prev_hop.0.unwrap(),
+ timer_ticks: 0,
value,
onion_payload,
cltv_expiry,
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::OutPoint;
use chain::keysinterface::KeysInterface;
-use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, PaymentId, PaymentSendFailure};
+use ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, ChannelManagerReadArgs, MPP_TIMEOUT_TICKS, PaymentId, PaymentSendFailure};
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::msgs;
use ln::msgs::ChannelMessageHandler;
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
}
+fn do_mpp_receive_timeout(send_partial_mpp: bool) {
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
+ let path = route.paths[0].clone();
+ route.paths.push(path);
+ route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0][0].short_channel_id = chan_1_id;
+ route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1][0].short_channel_id = chan_2_id;
+ route.paths[1][1].short_channel_id = chan_4_id;
+
+ // Initiate the MPP payment.
+ let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 2); // one monitor per path
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+
+ // Pass half of the payment along the first path.
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
+
+ if send_partial_mpp {
+ // Time out the partial MPP
+ for _ in 0..MPP_TIMEOUT_TICKS {
+ nodes[3].node.timer_tick_occurred();
+ }
+
+ // Failed HTLC from node 3 -> 1
+ expect_pending_htlcs_forwardable!(nodes[3]);
+ let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
+ assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
+ nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
+ check_added_monitors!(nodes[3], 1);
+ commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
+
+ // Failed HTLC from node 1 -> 0
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
+ check_added_monitors!(nodes[1], 1);
+ commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
+
+ expect_payment_failed_conditions!(nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
+ } else {
+ // Pass half of the payment along the second path.
+ pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
+
+ // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
+ for _ in 0..MPP_TIMEOUT_TICKS {
+ nodes[3].node.timer_tick_occurred();
+ }
+
+ claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+ }
+}
+
+#[test]
+fn mpp_receive_timeout() {
+ do_mpp_receive_timeout(true);
+ do_mpp_receive_timeout(false);
+}
+
#[test]
fn retry_expired_payment() {
let chanmon_cfgs = create_chanmon_cfgs(3);