X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpayment_tests.rs;h=8fc582745a8ed8ed2c327d867d59e5bdee727ab7;hb=ee9afd315d22151e314aff2ca826561569ac4d03;hp=c0c037fdba4b7508d651b33fbc87a82bccbb5e26;hpb=2d4bf974e5b588ce93cc8f1e133cda30ca1d91ee;p=rust-lightning diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c0c037fd..8fc58274 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -12,18 +12,18 @@ //! payments thereafter. use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::keysinterface::EntropySource; use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields}; use crate::ln::features::InvoiceFeatures; use crate::ln::msgs; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::outbound_payment::Retry; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; -use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters}; +use crate::routing::router::{get_route, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; use crate::util::test_utils; use crate::util::errors::APIError; @@ -102,7 +102,8 @@ fn mpp_retry() { }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -184,7 +185,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -256,8 +258,9 @@ fn no_pending_leak_on_initial_send_failure() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)), - true, APIError::ChannelUnavailable { ref err }, + unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); assert!(!nodes[0].node.has_pending_payments()); @@ -298,7 +301,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { payment_params: route.payment_params.clone().unwrap(), final_value_msat: amt_msat, }; - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -335,9 +339,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); - let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(as_broadcasted_txn.len(), 1); - assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + nodes[0].node.timer_tick_occurred(); + if !confirm_before_reload { + let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_broadcasted_txn.len(), 1); + assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + } else { + assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + } + check_added_monitors!(nodes[0], 1); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); @@ -433,8 +443,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.timer_tick_occurred(); } - assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment - nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + assert!(nodes[0].node.send_payment_with_route(&new_route, payment_hash, // Shouldn't be allowed to retry a fulfilled payment + RecipientOnionFields::secret_only(payment_secret), payment_id_1).is_err()); + nodes[0].node.send_payment_with_route(&new_route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -500,9 +512,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + check_added_monitors!(nodes[0], 1); nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -568,7 +582,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs) // confirming, we will fail as it's considered still-pending... let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); - match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) { + match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -586,7 +600,8 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { nodes_0_serialized = nodes[0].node.encode(); // After the payment failed, we're free to send it again. - assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok()); + assert!(nodes[0].node.send_payment_with_route(&new_route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); @@ -596,12 +611,13 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures // the payment is not (spuriously) listed as still pending. - assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok()); + assert!(nodes[0].node.send_payment_with_route(&new_route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) { + match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -618,7 +634,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) { + match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -847,7 +863,8 @@ fn get_ldk_payment_preimage() { &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); // Make sure to use `get_payment_preimage` @@ -1061,7 +1078,8 @@ fn claimed_send_payment_idempotent() { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id); + let send_result = nodes[0].node.send_payment_with_route(&route, second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1069,7 +1087,8 @@ fn claimed_send_payment_idempotent() { // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. - let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id); + let send_result = nodes[0].node.send_spontaneous_payment( + &route, None, RecipientOnionFields::spontaneous_empty(), payment_id); match send_result { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1109,7 +1128,8 @@ fn claimed_send_payment_idempotent() { nodes[0].node.timer_tick_occurred(); } - nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap(); + nodes[0].node.send_payment_with_route(&route, second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); @@ -1133,7 +1153,8 @@ fn abandoned_send_payment_idempotent() { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id); + let send_result = nodes[0].node.send_payment_with_route(&route, second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1141,7 +1162,8 @@ fn abandoned_send_payment_idempotent() { // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. - let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id); + let send_result = nodes[0].node.send_spontaneous_payment( + &route, None, RecipientOnionFields::spontaneous_empty(), payment_id); match send_result { Err(PaymentSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1165,7 +1187,8 @@ fn abandoned_send_payment_idempotent() { // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the // failed payment back. - nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap(); + nodes[0].node.send_payment_with_route(&route, second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); check_added_monitors!(nodes[0], 1); pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); @@ -1314,9 +1337,11 @@ fn test_holding_cell_inflight_htlcs() { // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. { - nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 0); } @@ -1396,7 +1421,8 @@ fn do_test_intercepted_payment(test: InterceptTest) { ).unwrap(); let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); let payment_event = { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); @@ -1631,7 +1657,8 @@ fn do_automatic_retries(test: AutoRetry) { if test == AutoRetry::Success { // Test that we can succeed on the first retry. - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -1646,7 +1673,9 @@ fn do_automatic_retries(test: AutoRetry) { pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage); } else if test == AutoRetry::Spontaneous { - nodes[0].node.send_spontaneous_payment_with_retry(Some(payment_preimage), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_spontaneous_payment_with_retry(Some(payment_preimage), + RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, + Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -1662,7 +1691,8 @@ fn do_automatic_retries(test: AutoRetry) { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage); } else if test == AutoRetry::FailAttempts { // Ensure ChannelManager will not retry a payment if it has run out of payment attempts. - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -1680,7 +1710,8 @@ fn do_automatic_retries(test: AutoRetry) { } else if test == AutoRetry::FailTimeout { #[cfg(not(feature = "no-std"))] { // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout. - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Advance the time so the second attempt fails due to timeout. @@ -1704,7 +1735,8 @@ fn do_automatic_retries(test: AutoRetry) { } else if test == AutoRetry::FailOnRestart { // Ensure ChannelManager will not retry a payment after restart, even if there were retry // attempts remaining prior to restart. - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -1736,7 +1768,8 @@ fn do_automatic_retries(test: AutoRetry) { _ => panic!("Unexpected event"), } } else if test == AutoRetry::FailOnRetry { - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should @@ -1867,7 +1900,8 @@ fn auto_retry_partial_failure() { }, Ok(retry_2_route)); // Send a payment that will partially fail on send, then partially fail on retry, then succeed. - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap(); let closed_chan_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(closed_chan_events.len(), 4); match closed_chan_events[0] { @@ -2000,7 +2034,8 @@ fn auto_retry_zero_attempts_send_error() { }; chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); @@ -2038,7 +2073,8 @@ fn fails_paying_after_rejected_by_payee() { final_value_msat: amt_msat, }; - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2129,7 +2165,8 @@ fn retry_multi_path_single_failed_payment() { scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); } - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -2203,7 +2240,8 @@ fn immediate_retry_on_failure() { payment_params: pay_params, final_value_msat: amt_msat, }, Ok(route.clone())); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -2311,7 +2349,8 @@ fn no_extra_retries_on_back_to_back_fail() { final_value_msat: amt_msat, }, Ok(route.clone())); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); let htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); assert_eq!(htlc_updates.msgs.len(), 1); @@ -2510,7 +2549,8 @@ fn test_simple_partial_retry() { final_value_msat: amt_msat / 2, }, Ok(route.clone())); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); let htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); assert_eq!(htlc_updates.msgs.len(), 1); @@ -2667,7 +2707,8 @@ fn test_threaded_payment_retries() { }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap(); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap(); check_added_monitors!(nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); @@ -2795,6 +2836,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); } // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid // the double-claim that would otherwise appear at the end of this test. + nodes[0].node.timer_tick_occurred(); let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_broadcasted_txn.len(), 1); @@ -2829,3 +2871,145 @@ fn no_missing_sent_on_midpoint_reload() { do_no_missing_sent_on_midpoint_reload(false); do_no_missing_sent_on_midpoint_reload(true); } + +fn do_claim_from_closed_chan(fail_payment: bool) { + // Previously, LDK would refuse to claim a payment if a channel on which the payment was + // received had been closed between when the HTLC was received and when we went to claim it. + // This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when + // presumably the sender may retry later. Long ago it also reduced total code in the claim + // pipeline. + // + // However, this doesn't make sense if you're trying to do an atomic swap or some other + // protocol that requires atomicity with some other action - if your money got claimed + // elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this + // is an over-optimization - there should be a very, very low likelihood that a channel closes + // between when we receive the last HTLC for a payment and the user goes to claim the payment. + // Since we now have code to handle this anyway we should allow it. + + // Build 4 nodes and send an MPP payment across two paths. By building a route manually set the + // CLTVs on the paths to different value resulting in a different claim deadline. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); + let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; + create_announced_chan_between_nodes(&nodes, 2, 3); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); + let mut route_params = RouteParameters { + payment_params: PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_features(nodes[1].node.invoice_features()), + final_value_msat: 10_000_000, + }; + let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params, + None, &nodes[0].node.compute_inflight_htlcs()).unwrap(); + // Make sure the route is ordered as the B->D path before C->D + route.paths.sort_by(|a, _| if a[0].pubkey == nodes[1].node.get_our_node_id() { + std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }); + + // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while + // the HTLC is being relayed. + route.paths[0][1].cltv_expiry_delta = TEST_FINAL_CLTV + 8; + route.paths[1][1].cltv_expiry_delta = TEST_FINAL_CLTV + 12; + let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1; + + nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); + nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + check_added_monitors(&nodes[0], 2); + let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + send_msgs.sort_by(|a, _| { + let a_node_id = + if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; + let node_b_id = nodes[1].node.get_our_node_id(); + if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater } + }); + + assert_eq!(send_msgs.len(), 2); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000, + payment_hash, Some(payment_secret), send_msgs.remove(0), false, None); + let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000, + payment_hash, Some(payment_secret), send_msgs.remove(0), true, None); + + match receive_event.unwrap() { + Event::PaymentClaimable { claim_deadline, .. } => { + assert_eq!(claim_deadline.unwrap(), final_cltv - HTLC_FAIL_BACK_BUFFER); + }, + _ => panic!(), + } + + // Ensure that the claim_deadline is correct, with the payment failing at exactly the given + // height. + connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1 + - if fail_payment { 0 } else { 2 }); + if fail_payment { + // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead + // and expire both immediately, though, by connecting another 4 blocks. + let reason = HTLCDestination::FailedPayment { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); + connect_blocks(&nodes[3], 4); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); + pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); + } else { + nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id()).unwrap(); + check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false); + check_closed_broadcast(&nodes[1], 1, true); + let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_tx.len(), 1); + + mine_transaction(&nodes[3], &bs_tx[0]); + check_added_monitors(&nodes[3], 1); + check_closed_broadcast(&nodes[3], 1, true); + check_closed_event(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false); + + nodes[3].node.claim_funds(payment_preimage); + check_added_monitors(&nodes[3], 2); + expect_payment_claimed!(nodes[3], payment_hash, 10_000_000); + + let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(ds_tx.len(), 1); + check_spends!(&ds_tx[0], &bs_tx[0]); + + mine_transactions(&nodes[1], &[&bs_tx[0], &ds_tx[0]]); + check_added_monitors(&nodes[1], 1); + expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, true); + + let bs_claims = nodes[1].node.get_and_clear_pending_msg_events(); + check_added_monitors(&nodes[1], 1); + assert_eq!(bs_claims.len(), 1); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); + } else { panic!(); } + + expect_payment_sent!(nodes[0], payment_preimage); + + let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); + assert_eq!(ds_claim_msgs.len(), 1); + let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] { + nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); + check_added_monitors(&nodes[2], 1); + commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); + expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); + cs_claim_msgs + } else { panic!(); }; + + assert_eq!(cs_claim_msgs.len(), 1); + if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { + nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); + } else { panic!(); } + + expect_payment_path_successful!(nodes[0]); + } +} + +#[test] +fn claim_from_closed_chan() { + do_claim_from_closed_chan(true); + do_claim_from_closed_chan(false); +}