+
+#[test]
+#[cfg(feature = "std")]
+fn test_threaded_payment_retries() {
+ // In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
+ // a single thread and would happily let multiple threads run retries at the same time. Because
+ // retries are done by first calculating the amount we need to retry, then dropping the
+ // relevant lock, then actually sending, we would happily let multiple threads retry the same
+ // amount at the same time, overpaying our original HTLC!
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ // There is one mitigating guardrail when retrying payments - we can never over-pay by more
+ // than 10% of the original value. Thus, we want all our retries to be below that. In order to
+ // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
+ // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
+ // our channel.
+ let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0);
+ let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id;
+ let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id;
+
+ let amt_msat = 100_000_000;
+ let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
+ #[cfg(feature = "std")]
+ let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
+ #[cfg(not(feature = "std"))]
+ let payment_expiry_secs = 60 * 60;
+ let mut invoice_features = InvoiceFeatures::empty();
+ invoice_features.set_variable_length_onion_required();
+ invoice_features.set_payment_secret_required();
+ invoice_features.set_basic_mpp_optional();
+ let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_expiry_time(payment_expiry_secs as u64)
+ .with_features(invoice_features);
+ let mut route_params = RouteParameters {
+ payment_params,
+ final_value_msat: amt_msat,
+ };
+
+ let mut route = Route {
+ paths: vec![
+ Path { hops: vec![RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ node_features: nodes[1].node.node_features(),
+ short_channel_id: chan_1_scid,
+ channel_features: nodes[1].node.channel_features(),
+ fee_msat: 0,
+ cltv_expiry_delta: 100,
+ }, RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ node_features: nodes[2].node.node_features(),
+ short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown
+ channel_features: nodes[2].node.channel_features(),
+ fee_msat: amt_msat / 1000,
+ cltv_expiry_delta: 100,
+ }], blinded_tail: None },
+ Path { hops: vec![RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ node_features: nodes[2].node.node_features(),
+ short_channel_id: chan_3_scid,
+ channel_features: nodes[2].node.channel_features(),
+ fee_msat: 100_000,
+ cltv_expiry_delta: 100,
+ }, RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ node_features: nodes[3].node.node_features(),
+ short_channel_id: chan_4_scid,
+ channel_features: nodes[3].node.channel_features(),
+ fee_msat: amt_msat - amt_msat / 1000,
+ cltv_expiry_delta: 100,
+ }], blinded_tail: None }
+ ],
+ payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
+ };
+ nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+ let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_msg_events.len(), 2);
+ send_msg_events.retain(|msg|
+ if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg {
+ // Drop the commitment update for nodes[2], we can just let that one sit pending
+ // forever.
+ *node_id == nodes[1].node.get_our_node_id()
+ } else { panic!(); }
+ );
+
+ // from here on out, the retry `RouteParameters` amount will be amt/1000
+ route_params.final_value_msat /= 1000;
+ route.paths.pop();
+
+ let end_time = Instant::now() + Duration::from_secs(1);
+ macro_rules! thread_body { () => { {
+ // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
+ let node_ref = NodePtr::from_node(&nodes[0]);
+ move || {
+ let node_a = unsafe { &*node_ref.0 };
+ while Instant::now() < end_time {
+ node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+ // Ignore if we have any pending events, just always pretend we just got a
+ // PendingHTLCsForwardable
+ node_a.node.process_pending_htlc_forwards();
+ }
+ }
+ } } }
+ let mut threads = Vec::new();
+ for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); }
+
+ // Back in the main thread, poll pending messages and make sure that we never have more than
+ // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
+ // there are HTLC messages shoved in while its running. This allows us to test that we never
+ // generate an additional update_add_htlc until we've fully failed the first.
+ let mut previously_failed_channels = Vec::new();
+ loop {
+ assert_eq!(send_msg_events.len(), 1);
+ let send_event = SendEvent::from_event(send_msg_events.pop().unwrap());
+ assert_eq!(send_event.msgs.len(), 1);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
+
+ // Note that we only push one route into `expect_find_route` at a time, because that's all
+ // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
+ // we should still ultimately fail for the same reason - because we're trying to send too
+ // many HTLCs at once.
+ let mut new_route_params = route_params.clone();
+ previously_failed_channels.push(route.paths[0].hops[1].short_channel_id);
+ new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
+ route.paths[0].hops[1].short_channel_id += 1;
+ nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
+
+ let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
+ // The "normal" commitment_signed_dance delivers the final RAA and then calls
+ // `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
+ // This races with our other threads which may generate an add-HTLCs commitment update via
+ // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
+ // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
+ let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true);
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &last_raa);
+
+ let cur_time = Instant::now();
+ if cur_time > end_time {
+ for thread in threads.drain(..) { thread.join().unwrap(); }
+ }
+
+ // Make sure we have some events to handle when we go around...
+ nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
+ nodes[0].node.process_pending_htlc_forwards();
+ send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[0], 2);
+
+ if cur_time > end_time {
+ break;
+ }
+ }
+}
+
+fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
+ // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
+ // receive the PaymentSent event even if the ChannelManager had no idea about the payment when
+ // it was last persisted.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let (persister_a, persister_b, persister_c);
+ let (chain_monitor_a, chain_monitor_b, chain_monitor_c);
+ let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ let mut nodes_0_serialized = Vec::new();
+ if !persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ if persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ nodes[1].node.claim_funds(our_payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+
+ // The ChannelMonitor should always be the latest version, as we're required to persist it
+ // during the commitment signed handling.
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); }
+ if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
+ // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
+ // the double-claim that would otherwise appear at the end of this test.
+ nodes[0].node.timer_tick_occurred();
+ let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_broadcasted_txn.len(), 1);
+
+ // Ensure that, even after some time, if we restart we still include *something* in the current
+ // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
+ // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
+ // A naive implementation of the fix here would wipe the pending payments set, causing a
+ // failure event when we restart.
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ // Ensure that we don't generate any further events even after the channel-closing commitment
+ // transaction is confirmed on-chain.
+ confirm_transaction(&nodes[0], &as_broadcasted_txn[0]);
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+}
+
+#[test]
+fn no_missing_sent_on_midpoint_reload() {
+ do_no_missing_sent_on_midpoint_reload(false);
+ do_no_missing_sent_on_midpoint_reload(true);
+}
+
+fn do_claim_from_closed_chan(fail_payment: bool) {
+ // Previously, LDK would refuse to claim a payment if a channel on which the payment was
+ // received had been closed between when the HTLC was received and when we went to claim it.
+ // This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when
+ // presumably the sender may retry later. Long ago it also reduced total code in the claim
+ // pipeline.
+ //
+ // However, this doesn't make sense if you're trying to do an atomic swap or some other
+ // protocol that requires atomicity with some other action - if your money got claimed
+ // elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this
+ // is an over-optimization - there should be a very, very low likelihood that a channel closes
+ // between when we receive the last HTLC for a payment and the user goes to claim the payment.
+ // Since we now have code to handle this anyway we should allow it.
+
+ // Build 4 nodes and send an MPP payment across two paths. By building a route manually set the
+ // CLTVs on the paths to different value resulting in a different claim deadline.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0);
+ let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes(&nodes, 2, 3);
+
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]);
+ let mut route_params = RouteParameters {
+ payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_features(nodes[1].node.invoice_features()),
+ final_value_msat: 10_000_000,
+ };
+ let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params,
+ None, &nodes[0].node.compute_inflight_htlcs()).unwrap();
+ // Make sure the route is ordered as the B->D path before C->D
+ route.paths.sort_by(|a, _| if a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
+ std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater });
+
+ // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while
+ // the HTLC is being relayed.
+ route.paths[0].hops[1].cltv_expiry_delta = TEST_FINAL_CLTV + 8;
+ route.paths[1].hops[1].cltv_expiry_delta = TEST_FINAL_CLTV + 12;
+ let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1;
+
+ nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap();
+ check_added_monitors(&nodes[0], 2);
+ let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ send_msgs.sort_by(|a, _| {
+ let a_node_id =
+ if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() };
+ let node_b_id = nodes[1].node.get_our_node_id();
+ if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }
+ });
+
+ assert_eq!(send_msgs.len(), 2);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000,
+ payment_hash, Some(payment_secret), send_msgs.remove(0), false, None);
+ let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000,
+ payment_hash, Some(payment_secret), send_msgs.remove(0), true, None);
+
+ match receive_event.unwrap() {
+ Event::PaymentClaimable { claim_deadline, .. } => {
+ assert_eq!(claim_deadline.unwrap(), final_cltv - HTLC_FAIL_BACK_BUFFER);
+ },
+ _ => panic!(),
+ }
+
+ // Ensure that the claim_deadline is correct, with the payment failing at exactly the given
+ // height.
+ connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1
+ - if fail_payment { 0 } else { 2 });
+ if fail_payment {
+ // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead
+ // and expire both immediately, though, by connecting another 4 blocks.
+ let reason = HTLCDestination::FailedPayment { payment_hash };
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]);
+ connect_blocks(&nodes[3], 4);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]);
+ pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
+ } else {
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id()).unwrap();
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false);
+ check_closed_broadcast(&nodes[1], 1, true);
+ let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_tx.len(), 1);
+
+ mine_transaction(&nodes[3], &bs_tx[0]);
+ check_added_monitors(&nodes[3], 1);
+ check_closed_broadcast(&nodes[3], 1, true);
+ check_closed_event(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false);
+
+ nodes[3].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[3], 2);
+ expect_payment_claimed!(nodes[3], payment_hash, 10_000_000);
+
+ let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(ds_tx.len(), 1);
+ check_spends!(&ds_tx[0], &bs_tx[0]);
+
+ mine_transactions(&nodes[1], &[&bs_tx[0], &ds_tx[0]]);
+ check_added_monitors(&nodes[1], 1);
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, true);
+
+ let bs_claims = nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors(&nodes[1], 1);
+ assert_eq!(bs_claims.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
+ } else { panic!(); }
+
+ expect_payment_sent!(nodes[0], payment_preimage);
+
+ let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+ assert_eq!(ds_claim_msgs.len(), 1);
+ let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] {
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events();
+ check_added_monitors(&nodes[2], 1);
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+ expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+ cs_claim_msgs
+ } else { panic!(); };
+
+ assert_eq!(cs_claim_msgs.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true);
+ } else { panic!(); }
+
+ expect_payment_path_successful!(nodes[0]);
+ }
+}
+
+#[test]
+fn claim_from_closed_chan() {
+ do_claim_from_closed_chan(true);
+ do_claim_from_closed_chan(false);
+}
+
+fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
+ // Check that a payment metadata received on one HTLC that doesn't match the one received on
+ // another results in the HTLC being rejected.
+ //
+ // We first set up a diamond shaped network, allowing us to split a payment into two HTLCs, the
+ // first of which we'll deliver and the second of which we'll fail and then re-send with
+ // modified payment metadata, which will in turn result in it being failed by the recipient.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(config), Some(config), Some(config)]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_0_deserialized;
+
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_id_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0);
+ let chan_id_cd = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+
+ // Pay more than half of each channel's max, requiring MPP
+ let amt_msat = 750_000_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3], Some(amt_msat));
+ let payment_id = PaymentId(payment_hash.0);
+ let payment_metadata = vec![44, 49, 52, 142];
+
+ let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_features(nodes[1].node.invoice_features());
+ let mut route_params = RouteParameters {
+ payment_params,
+ final_value_msat: amt_msat,
+ };
+
+ // Send the MPP payment, delivering the updated commitment state to nodes[1].
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields {
+ payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata),
+ }, payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_events.len(), 2);
+ let first_send = SendEvent::from_event(send_events.pop().unwrap());
+ let second_send = SendEvent::from_event(send_events.pop().unwrap());
+
+ let (b_recv_ev, c_recv_ev) = if first_send.node_id == nodes[1].node.get_our_node_id() {
+ (&first_send, &second_send)
+ } else {
+ (&second_send, &first_send)
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &b_recv_ev.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors(&nodes[1], 1);
+ let b_forward_ev = SendEvent::from_node(&nodes[1]);
+ nodes[3].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &b_forward_ev.msgs[0]);
+ commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[3]);
+
+ // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which
+ // will result in nodes[2] failing the HTLC back.
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+ nodes[3].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true);
+
+ let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true);
+
+ let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(payment_fail_retryable_evs.len(), 2);
+ if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] {} else { panic!(); }
+ if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] {} else { panic!(); }
+
+ // Before we allow the HTLC to be retried, optionally change the payment_metadata we have
+ // stored for our payment.
+ if do_modify {
+ nodes[0].node.test_set_payment_metadata(payment_id, Some(Vec::new()));
+ }
+
+ // Optionally reload nodes[3] to check that the payment_metadata is properly serialized with
+ // the payment state.
+ if do_reload {
+ let mon_bd = get_monitor!(nodes[3], chan_id_bd).encode();
+ let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode();
+ reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd],
+ persister, new_chain_monitor, nodes_0_deserialized);
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+ reconnect_nodes(&nodes[1], &nodes[3], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+ reconnect_nodes(&nodes[2], &nodes[3], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // Create a new channel between C and D as A will refuse to retry on the existing one because
+ // it just failed.
+ let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+
+ // Now retry the failed HTLC.
+ nodes[0].node.process_pending_htlc_forwards();
+ check_added_monitors(&nodes[0], 1);
+ let as_resend = SendEvent::from_node(&nodes[0]);
+ nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resend.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors(&nodes[2], 1);
+ let cs_forward = SendEvent::from_node(&nodes[2]);
+ nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]);
+ commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true);
+
+ // Finally, check that nodes[3] does the correct thing - either accepting the payment or, if
+ // the payment metadata was modified, failing only the one modified HTLC and retaining the
+ // other.
+ if do_modify {
+ expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ nodes[3].node.process_pending_htlc_forwards();
+ expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(),
+ &[HTLCDestination::FailedPayment {payment_hash}]);
+ nodes[3].node.process_pending_htlc_forwards();
+
+ check_added_monitors(&nodes[3], 1);
+ let ds_fail = get_htlc_update_msgs(&nodes[3], &nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true);
+ expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(),
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]);
+ } else {
+ expect_pending_htlcs_forwardable!(nodes[3]);
+ expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat);
+ claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+ }
+}
+
+#[test]
+fn test_payment_metadata_consistency() {
+ do_test_payment_metadata_consistency(true, true);
+ do_test_payment_metadata_consistency(true, false);
+ do_test_payment_metadata_consistency(false, true);
+ do_test_payment_metadata_consistency(false, false);
+}