+
+fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
+ // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
+ // receive the PaymentSent event even if the ChannelManager had no idea about the payment when
+ // it was last persisted.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let (persister_a, persister_b, persister_c);
+ let (chain_monitor_a, chain_monitor_b, chain_monitor_c);
+ let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ let mut nodes_0_serialized = Vec::new();
+ if !persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ if persist_manager_with_payment {
+ nodes_0_serialized = nodes[0].node.encode();
+ }
+
+ nodes[1].node.claim_funds(our_payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+ expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+
+ // The ChannelMonitor should always be the latest version, as we're required to persist it
+ // during the commitment signed handling.
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); }
+ if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
+ // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
+ // the double-claim that would otherwise appear at the end of this test.
+ nodes[0].node.timer_tick_occurred();
+ let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_broadcasted_txn.len(), 1);
+
+ // Ensure that, even after some time, if we restart we still include *something* in the current
+ // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
+ // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
+ // A naive implementation of the fix here would wipe the pending payments set, causing a
+ // failure event when we restart.
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ // Ensure that we don't generate any further events even after the channel-closing commitment
+ // transaction is confirmed on-chain.
+ confirm_transaction(&nodes[0], &as_broadcasted_txn[0]);
+ for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+
+ let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
+ reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert!(events.is_empty());
+}
+
+#[test]
+fn no_missing_sent_on_midpoint_reload() {
+ do_no_missing_sent_on_midpoint_reload(false);
+ do_no_missing_sent_on_midpoint_reload(true);
+}
+
+fn do_claim_from_closed_chan(fail_payment: bool) {
+ // Previously, LDK would refuse to claim a payment if a channel on which the payment was
+ // received had been closed between when the HTLC was received and when we went to claim it.
+ // This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when
+ // presumably the sender may retry later. Long ago it also reduced total code in the claim
+ // pipeline.
+ //
+ // However, this doesn't make sense if you're trying to do an atomic swap or some other
+ // protocol that requires atomicity with some other action - if your money got claimed
+ // elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this
+ // is an over-optimization - there should be a very, very low likelihood that a channel closes
+ // between when we receive the last HTLC for a payment and the user goes to claim the payment.
+ // Since we now have code to handle this anyway we should allow it.
+
+ // Build 4 nodes and send an MPP payment across two paths. By building a route manually set the
+ // CLTVs on the paths to different value resulting in a different claim deadline.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0);
+ let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes(&nodes, 2, 3);
+
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]);
+ let mut route_params = RouteParameters {
+ payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_bolt11_features(nodes[1].node.invoice_features()).unwrap(),
+ final_value_msat: 10_000_000,
+ };
+ let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params,
+ None, &nodes[0].node.compute_inflight_htlcs()).unwrap();
+ // Make sure the route is ordered as the B->D path before C->D
+ route.paths.sort_by(|a, _| if a.hops[0].pubkey == nodes[1].node.get_our_node_id() {
+ std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater });
+
+ // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while
+ // the HTLC is being relayed.
+ route.paths[0].hops[1].cltv_expiry_delta = TEST_FINAL_CLTV + 8;
+ route.paths[1].hops[1].cltv_expiry_delta = TEST_FINAL_CLTV + 12;
+ let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1;
+
+ nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
+ PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap();
+ check_added_monitors(&nodes[0], 2);
+ let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ send_msgs.sort_by(|a, _| {
+ let a_node_id =
+ if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() };
+ let node_b_id = nodes[1].node.get_our_node_id();
+ if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }
+ });
+
+ assert_eq!(send_msgs.len(), 2);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000,
+ payment_hash, Some(payment_secret), send_msgs.remove(0), false, None);
+ let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000,
+ payment_hash, Some(payment_secret), send_msgs.remove(0), true, None);
+
+ match receive_event.unwrap() {
+ Event::PaymentClaimable { claim_deadline, .. } => {
+ assert_eq!(claim_deadline.unwrap(), final_cltv - HTLC_FAIL_BACK_BUFFER);
+ },
+ _ => panic!(),
+ }
+
+ // Ensure that the claim_deadline is correct, with the payment failing at exactly the given
+ // height.
+ connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1
+ - if fail_payment { 0 } else { 2 });
+ if fail_payment {
+ // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead
+ // and expire both immediately, though, by connecting another 4 blocks.
+ let reason = HTLCDestination::FailedPayment { payment_hash };
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]);
+ connect_blocks(&nodes[3], 4);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]);
+ pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected);
+ } else {
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id()).unwrap();
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false);
+ check_closed_broadcast(&nodes[1], 1, true);
+ let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_tx.len(), 1);
+
+ mine_transaction(&nodes[3], &bs_tx[0]);
+ check_added_monitors(&nodes[3], 1);
+ check_closed_broadcast(&nodes[3], 1, true);
+ check_closed_event(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false);
+
+ nodes[3].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[3], 2);
+ expect_payment_claimed!(nodes[3], payment_hash, 10_000_000);
+
+ let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(ds_tx.len(), 1);
+ check_spends!(&ds_tx[0], &bs_tx[0]);
+
+ mine_transactions(&nodes[1], &[&bs_tx[0], &ds_tx[0]]);
+ check_added_monitors(&nodes[1], 1);
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, true);
+
+ let bs_claims = nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors(&nodes[1], 1);
+ assert_eq!(bs_claims.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
+ } else { panic!(); }
+
+ expect_payment_sent!(nodes[0], payment_preimage);
+
+ let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events();
+ assert_eq!(ds_claim_msgs.len(), 1);
+ let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] {
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events();
+ check_added_monitors(&nodes[2], 1);
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
+ expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
+ cs_claim_msgs
+ } else { panic!(); };
+
+ assert_eq!(cs_claim_msgs.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true);
+ } else { panic!(); }
+
+ expect_payment_path_successful!(nodes[0]);
+ }
+}
+
+#[test]
+fn claim_from_closed_chan() {
+ do_claim_from_closed_chan(true);
+ do_claim_from_closed_chan(false);
+}
+
+fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
+ // Check that a payment metadata received on one HTLC that doesn't match the one received on
+ // another results in the HTLC being rejected.
+ //
+ // We first set up a diamond shaped network, allowing us to split a payment into two HTLCs, the
+ // first of which we'll deliver and the second of which we'll fail and then re-send with
+ // modified payment metadata, which will in turn result in it being failed by the recipient.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50;
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(config), Some(config), Some(config)]);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_0_deserialized;
+
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_id_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0);
+ let chan_id_cd = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+
+ // Pay more than half of each channel's max, requiring MPP
+ let amt_msat = 750_000_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3], Some(amt_msat));
+ let payment_id = PaymentId(payment_hash.0);
+ let payment_metadata = vec![44, 49, 52, 142];
+
+ let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_bolt11_features(nodes[1].node.invoice_features()).unwrap();
+ let mut route_params = RouteParameters {
+ payment_params,
+ final_value_msat: amt_msat,
+ };
+
+ // Send the MPP payment, delivering the updated commitment state to nodes[1].
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields {
+ payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata),
+ }, payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(send_events.len(), 2);
+ let first_send = SendEvent::from_event(send_events.pop().unwrap());
+ let second_send = SendEvent::from_event(send_events.pop().unwrap());
+
+ let (b_recv_ev, c_recv_ev) = if first_send.node_id == nodes[1].node.get_our_node_id() {
+ (&first_send, &second_send)
+ } else {
+ (&second_send, &first_send)
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &b_recv_ev.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors(&nodes[1], 1);
+ let b_forward_ev = SendEvent::from_node(&nodes[1]);
+ nodes[3].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &b_forward_ev.msgs[0]);
+ commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[3]);
+
+ // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which
+ // will result in nodes[2] failing the HTLC back.
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+ nodes[3].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true);
+
+ let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true);
+
+ let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(payment_fail_retryable_evs.len(), 2);
+ if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] {} else { panic!(); }
+ if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] {} else { panic!(); }
+
+ // Before we allow the HTLC to be retried, optionally change the payment_metadata we have
+ // stored for our payment.
+ if do_modify {
+ nodes[0].node.test_set_payment_metadata(payment_id, Some(Vec::new()));
+ }
+
+ // Optionally reload nodes[3] to check that the payment_metadata is properly serialized with
+ // the payment state.
+ if do_reload {
+ let mon_bd = get_monitor!(nodes[3], chan_id_bd).encode();
+ let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode();
+ reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd],
+ persister, new_chain_monitor, nodes_0_deserialized);
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
+ reconnect_nodes(&nodes[1], &nodes[3], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+ reconnect_nodes(&nodes[2], &nodes[3], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // Create a new channel between C and D as A will refuse to retry on the existing one because
+ // it just failed.
+ let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+
+ // Now retry the failed HTLC.
+ nodes[0].node.process_pending_htlc_forwards();
+ check_added_monitors(&nodes[0], 1);
+ let as_resend = SendEvent::from_node(&nodes[0]);
+ nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resend.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors(&nodes[2], 1);
+ let cs_forward = SendEvent::from_node(&nodes[2]);
+ nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]);
+ commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true);
+
+ // Finally, check that nodes[3] does the correct thing - either accepting the payment or, if
+ // the payment metadata was modified, failing only the one modified HTLC and retaining the
+ // other.
+ if do_modify {
+ expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ nodes[3].node.process_pending_htlc_forwards();
+ expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(),
+ &[HTLCDestination::FailedPayment {payment_hash}]);
+ nodes[3].node.process_pending_htlc_forwards();
+
+ check_added_monitors(&nodes[3], 1);
+ let ds_fail = get_htlc_update_msgs(&nodes[3], &nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true);
+ expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(),
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]);
+ } else {
+ expect_pending_htlcs_forwardable!(nodes[3]);
+ expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat);
+ claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+ }
+}
+
+#[test]
+fn test_payment_metadata_consistency() {
+ do_test_payment_metadata_consistency(true, true);
+ do_test_payment_metadata_consistency(true, false);
+ do_test_payment_metadata_consistency(false, true);
+ do_test_payment_metadata_consistency(false, false);
+}