X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpayment_tests.rs;h=51811ec632943efb373031bc87eacab3feecfb69;hb=29b9eb3936fa9de3cafffe05506e537d44d2a862;hp=412e7f774f92c24e9ae3ccb85c8c356f91f8e1e4;hpb=1016e1f605ff03ed14ed875e7cd4f567ae15c96a;p=rust-lightning diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 412e7f77..51811ec6 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -12,10 +12,10 @@ //! payments thereafter. use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::keysinterface::EntropySource; use crate::chain::transaction::OutPoint; -use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure}; +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields}; use crate::ln::features::InvoiceFeatures; @@ -23,7 +23,7 @@ use crate::ln::msgs; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Retry; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; -use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters}; +use crate::routing::router::{get_route, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; use crate::util::test_utils; use crate::util::errors::APIError; @@ -343,7 +343,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { if !confirm_before_reload { let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_broadcasted_txn.len(), 1); - assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + assert_eq!(as_broadcasted_txn[0].txid(), as_commitment_tx.txid()); } else { assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); } @@ -406,9 +406,11 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]); expect_payment_sent!(nodes[0], payment_preimage_1); connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20); - let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(as_htlc_timeout_txn.len(), 2); - let (first_htlc_timeout_tx, second_htlc_timeout_tx) = (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1]); + let (first_htlc_timeout_tx, second_htlc_timeout_tx) = { + let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(txn.len(), 2); + (txn.remove(0), txn.remove(0)) + }; check_spends!(first_htlc_timeout_tx, as_commitment_tx); check_spends!(second_htlc_timeout_tx, as_commitment_tx); if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output { @@ -682,7 +684,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 3); - assert_eq!(node_txn[0], node_txn[1]); + assert_eq!(node_txn[0].txid(), node_txn[1].txid()); check_spends!(node_txn[1], funding_tx); check_spends!(node_txn[2], node_txn[1]); let timeout_txn = vec![node_txn[2].clone()]; @@ -1183,7 +1185,7 @@ fn abandoned_send_payment_idempotent() { } check_send_rejected!(); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash); + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash, PaymentFailureReason::RecipientRejected); // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the // failed payment back. @@ -1725,9 +1727,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => { + Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { assert_eq!(payment_hash, *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -1761,9 +1764,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => { + Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { assert_eq!(payment_hash, *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -1781,9 +1785,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => { + Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { assert_eq!(payment_hash, *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); + assert_eq!(PaymentFailureReason::RouteNotFound, ev_reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2087,7 +2092,7 @@ fn fails_paying_after_rejected_by_payee() { nodes[1].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash); + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); } #[test] @@ -2463,9 +2468,10 @@ fn no_extra_retries_on_back_to_back_fail() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => { + Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { assert_eq!(payment_hash, *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2871,3 +2877,293 @@ fn no_missing_sent_on_midpoint_reload() { do_no_missing_sent_on_midpoint_reload(false); do_no_missing_sent_on_midpoint_reload(true); } + +fn do_claim_from_closed_chan(fail_payment: bool) { + // Previously, LDK would refuse to claim a payment if a channel on which the payment was + // received had been closed between when the HTLC was received and when we went to claim it. + // This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when + // presumably the sender may retry later. Long ago it also reduced total code in the claim + // pipeline. + // + // However, this doesn't make sense if you're trying to do an atomic swap or some other + // protocol that requires atomicity with some other action - if your money got claimed + // elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this + // is an over-optimization - there should be a very, very low likelihood that a channel closes + // between when we receive the last HTLC for a payment and the user goes to claim the payment. + // Since we now have code to handle this anyway we should allow it. + + // Build 4 nodes and send an MPP payment across two paths. By building a route manually set the + // CLTVs on the paths to different value resulting in a different claim deadline. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); + let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; + create_announced_chan_between_nodes(&nodes, 2, 3); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); + let mut route_params = RouteParameters { + payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_features(nodes[1].node.invoice_features()), + final_value_msat: 10_000_000, + }; + let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params, + None, &nodes[0].node.compute_inflight_htlcs()).unwrap(); + // Make sure the route is ordered as the B->D path before C->D + route.paths.sort_by(|a, _| if a[0].pubkey == nodes[1].node.get_our_node_id() { + std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }); + + // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while + // the HTLC is being relayed. + route.paths[0][1].cltv_expiry_delta = TEST_FINAL_CLTV + 8; + route.paths[1][1].cltv_expiry_delta = TEST_FINAL_CLTV + 12; + let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1; + + nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + check_added_monitors(&nodes[0], 2); + let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + send_msgs.sort_by(|a, _| { + let a_node_id = + if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; + let node_b_id = nodes[1].node.get_our_node_id(); + if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater } + }); + + assert_eq!(send_msgs.len(), 2); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000, + payment_hash, Some(payment_secret), send_msgs.remove(0), false, None); + let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000, + payment_hash, Some(payment_secret), send_msgs.remove(0), true, None); + + match receive_event.unwrap() { + Event::PaymentClaimable { claim_deadline, .. } => { + assert_eq!(claim_deadline.unwrap(), final_cltv - HTLC_FAIL_BACK_BUFFER); + }, + _ => panic!(), + } + + // Ensure that the claim_deadline is correct, with the payment failing at exactly the given + // height. + connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1 + - if fail_payment { 0 } else { 2 }); + if fail_payment { + // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead + // and expire both immediately, though, by connecting another 4 blocks. + let reason = HTLCDestination::FailedPayment { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); + connect_blocks(&nodes[3], 4); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); + pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + } else { + nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id()).unwrap(); + check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false); + check_closed_broadcast(&nodes[1], 1, true); + let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_tx.len(), 1); + + mine_transaction(&nodes[3], &bs_tx[0]); + check_added_monitors(&nodes[3], 1); + check_closed_broadcast(&nodes[3], 1, true); + check_closed_event(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false); + + nodes[3].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[3], 2); + expect_payment_claimed!(nodes[3], payment_hash, 10_000_000); + + let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(ds_tx.len(), 1); + check_spends!(&ds_tx[0], &bs_tx[0]); + + mine_transactions(&nodes[1], &[&bs_tx[0], &ds_tx[0]]); + check_added_monitors(&nodes[1], 1); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, true); + + let bs_claims = nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors(&nodes[1], 1); + assert_eq!(bs_claims.len(), 1); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); + } else { panic!(); } + + expect_payment_sent!(nodes[0], payment_preimage); + + let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); + assert_eq!(ds_claim_msgs.len(), 1); + let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] { + nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); + check_added_monitors(&nodes[2], 1); + commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); + expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); + cs_claim_msgs + } else { panic!(); }; + + assert_eq!(cs_claim_msgs.len(), 1); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { + nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); + } else { panic!(); } + + expect_payment_path_successful!(nodes[0]); + } +} + +#[test] +fn claim_from_closed_chan() { + do_claim_from_closed_chan(true); + do_claim_from_closed_chan(false); +} + +fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { + // Check that a payment metadata received on one HTLC that doesn't match the one received on + // another results in the HTLC being rejected. + // + // We first set up a diamond shaped network, allowing us to split a payment into two HTLCs, the + // first of which we'll deliver and the second of which we'll fail and then re-send with + // modified payment metadata, which will in turn result in it being failed by the recipient. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(config), Some(config), Some(config)]); + + let persister; + let new_chain_monitor; + let nodes_0_deserialized; + + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); + let chan_id_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); + let chan_id_cd = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2; + + // Pay more than half of each channel's max, requiring MPP + let amt_msat = 750_000_000; + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3], Some(amt_msat)); + let payment_id = PaymentId(payment_hash.0); + let payment_metadata = vec![44, 49, 52, 142]; + + let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_features(nodes[1].node.invoice_features()); + let mut route_params = RouteParameters { + payment_params, + final_value_msat: amt_msat, + }; + + // Send the MPP payment, delivering the updated commitment state to nodes[1]. + nodes[0].node.send_payment(payment_hash, RecipientOnionFields { + payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata), + }, payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + check_added_monitors!(nodes[0], 2); + + let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(send_events.len(), 2); + let first_send = SendEvent::from_event(send_events.pop().unwrap()); + let second_send = SendEvent::from_event(send_events.pop().unwrap()); + + let (b_recv_ev, c_recv_ev) = if first_send.node_id == nodes[1].node.get_our_node_id() { + (&first_send, &second_send) + } else { + (&second_send, &first_send) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &b_recv_ev.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true); + + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors(&nodes[1], 1); + let b_forward_ev = SendEvent::from_node(&nodes[1]); + nodes[3].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &b_forward_ev.msgs[0]); + commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true); + + expect_pending_htlcs_forwardable!(nodes[3]); + + // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which + // will result in nodes[2] failing the HTLC back. + nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id()); + nodes[3].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); + + let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true); + + let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(payment_fail_retryable_evs.len(), 2); + if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] {} else { panic!(); } + if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] {} else { panic!(); } + + // Before we allow the HTLC to be retried, optionally change the payment_metadata we have + // stored for our payment. + if do_modify { + nodes[0].node.test_set_payment_metadata(payment_id, Some(Vec::new())); + } + + // Optionally reload nodes[3] to check that the payment_metadata is properly serialized with + // the payment state. + if do_reload { + let mon_bd = get_monitor!(nodes[3], chan_id_bd).encode(); + let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode(); + reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd], + persister, new_chain_monitor, nodes_0_deserialized); + nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id()); + reconnect_nodes(&nodes[1], &nodes[3], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + reconnect_nodes(&nodes[2], &nodes[3], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + + // Create a new channel between C and D as A will refuse to retry on the existing one because + // it just failed. + let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2; + + // Now retry the failed HTLC. + nodes[0].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[0], 1); + let as_resend = SendEvent::from_node(&nodes[0]); + nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resend.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true); + + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors(&nodes[2], 1); + let cs_forward = SendEvent::from_node(&nodes[2]); + nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]); + commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true); + + // Finally, check that nodes[3] does the correct thing - either accepting the payment or, if + // the payment metadata was modified, failing only the one modified HTLC and retaining the + // other. + if do_modify { + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment {payment_hash}]); + nodes[3].node.process_pending_htlc_forwards(); + + check_added_monitors(&nodes[3], 1); + let ds_fail = get_htlc_update_msgs(&nodes[3], &nodes[2].node.get_our_node_id()); + + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); + expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]); + } else { + expect_pending_htlcs_forwardable!(nodes[3]); + expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); + claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); + } +} + +#[test] +fn test_payment_metadata_consistency() { + do_test_payment_metadata_consistency(true, true); + do_test_payment_metadata_consistency(true, false); + do_test_payment_metadata_consistency(false, true); + do_test_payment_metadata_consistency(false, false); +}