X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fpayment_tests.rs;h=1ce0cc0345869dd9a392ebf78944c124d471aefc;hb=783e8188a7bd6470d5926bf5d1bae1350996801a;hp=3285cf4d7a5b41de63766f1c92776bd19f01d364;hpb=bf03d4ccbe78dd7d951760c0d417bb9218273a87;p=rust-lightning diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 3285cf4d..1ce0cc03 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -15,6 +15,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::keysinterface::EntropySource; use crate::chain::transaction::OutPoint; +use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails}; use crate::ln::features::InvoiceFeatures; @@ -24,10 +25,10 @@ use crate::ln::outbound_payment::Retry; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; -use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use crate::util::test_utils; use crate::util::errors::APIError; use crate::util::ser::Writeable; +use crate::util::string::UntrustedString; use bitcoin::{Block, BlockHeader, TxMerkleNode}; use bitcoin::hashes::Hash; @@ -98,7 +99,6 @@ fn mpp_retry() { let mut route_params = RouteParameters { payment_params: route.payment_params.clone().unwrap(), final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); @@ -253,12 +253,12 @@ fn no_pending_leak_on_initial_send_failure() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!")); + assert_eq!(err, "Peer for first hop currently disconnected")); assert!(!nodes[0].node.has_pending_payments()); } @@ -297,7 +297,6 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let route_params = RouteParameters { payment_params: route.payment_params.clone().unwrap(), final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -310,8 +309,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment // which would prevent retry. - nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); - nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); @@ -336,17 +335,23 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); - let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(as_broadcasted_txn.len(), 1); - assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + nodes[0].node.timer_tick_occurred(); + if !confirm_before_reload { + let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_broadcasted_txn.len(), 1); + assert_eq!(as_broadcasted_txn[0], as_commitment_tx); + } else { + assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); + } + check_added_monitors!(nodes[0], 1); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); @@ -355,7 +360,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }); check_added_monitors!(nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); }, @@ -496,21 +501,23 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager); + nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); @@ -520,7 +527,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) }); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) }); check_added_monitors!(nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, @@ -591,7 +598,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -615,7 +622,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Check that after reload we can send the payment again (though we shouldn't, since it was // claimed previously). reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); @@ -660,8 +667,8 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); @@ -812,7 +819,7 @@ fn test_fulfill_restart_failure() { // Now reload nodes[1]... reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); - nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.fail_htlc_backwards(&payment_hash); @@ -935,7 +942,7 @@ fn successful_probe_yields_event() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events.drain(..).next().unwrap() { - crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => { + crate::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => { assert_eq!(payment_id, ev_pid); assert_eq!(payment_hash, ev_ph); }, @@ -981,7 +988,7 @@ fn failed_probe_yields_event() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events.drain(..).next().unwrap() { - crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => { + crate::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => { assert_eq!(payment_id, ev_pid); assert_eq!(payment_hash, ev_ph); }, @@ -1194,33 +1201,31 @@ fn test_trivial_inflight_htlc_tracking(){ let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); // Send and claim the payment. Inflight HTLCs should be empty. - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 500000); - nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&vec!(&nodes[1], &nodes[2])[..]], 500000, payment_hash, payment_secret); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], payment_preimage); + let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1; + let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); - let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), channel_1.get_short_channel_id().unwrap() ); + assert_eq!(chan_1_used_liquidity, None); + } + { + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), channel_2.get_short_channel_id().unwrap() ); - assert_eq!(chan_1_used_liquidity, None); assert_eq!(chan_2_used_liquidity, None); } let pending_payments = nodes[0].node.list_recent_payments(); @@ -1233,30 +1238,32 @@ fn test_trivial_inflight_htlc_tracking(){ } // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment. - let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000); + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); + let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); - let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), channel_1.get_short_channel_id().unwrap() ); + // First hop accounts for expected 1000 msat fee + assert_eq!(chan_1_used_liquidity, Some(501000)); + } + { + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), channel_2.get_short_channel_id().unwrap() ); - // First hop accounts for expected 1000 msat fee - assert_eq!(chan_1_used_liquidity, Some(501000)); assert_eq!(chan_2_used_liquidity, Some(500000)); } let pending_payments = nodes[0].node.list_recent_payments(); @@ -1271,28 +1278,29 @@ fn test_trivial_inflight_htlc_tracking(){ nodes[0].node.timer_tick_occurred(); } + let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); - let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), channel_1.get_short_channel_id().unwrap() ); + assert_eq!(chan_1_used_liquidity, None); + } + { + let mut node_1_per_peer_lock; + let mut node_1_peer_state_lock; + let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), channel_2.get_short_channel_id().unwrap() ); - - assert_eq!(chan_1_used_liquidity, None); assert_eq!(chan_2_used_liquidity, None); } @@ -1387,12 +1395,12 @@ fn do_test_intercepted_payment(test: InterceptTest) { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let route = get_route( &nodes[0].node.get_our_node_id(), &route_params.payment_params, &nodes[0].network_graph.read_only(), None, route_params.final_value_msat, - route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes + route_params.payment_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, + &random_seed_bytes, ).unwrap(); let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); @@ -1414,7 +1422,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amount_msat) = match events[0] { - crate::util::events::Event::HTLCIntercepted { + crate::events::Event::HTLCIntercepted { intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id } => { assert_eq!(pmt_hash, payment_hash); @@ -1577,7 +1585,6 @@ fn do_automatic_retries(test: AutoRetry) { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); @@ -1787,7 +1794,6 @@ fn auto_retry_partial_failure() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the @@ -1857,27 +1863,37 @@ fn auto_retry_partial_failure() { payment_params: Some(route_params.payment_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route)); + let mut payment_params = route_params.payment_params.clone(); + payment_params.previously_failed_channels.push(chan_2_id); nodes[0].router.expect_find_route(RouteParameters { - payment_params: route_params.payment_params.clone(), - final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV + payment_params, final_value_msat: amt_msat / 2, }, Ok(retry_1_route)); + let mut payment_params = route_params.payment_params.clone(); + payment_params.previously_failed_channels.push(chan_3_id); nodes[0].router.expect_find_route(RouteParameters { - payment_params: route_params.payment_params.clone(), - final_value_msat: amt_msat / 4, final_cltv_expiry_delta: TEST_FINAL_CLTV + payment_params, final_value_msat: amt_msat / 4, }, Ok(retry_2_route)); // Send a payment that will partially fail on send, then partially fail on retry, then succeed. nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap(); let closed_chan_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(closed_chan_events.len(), 2); + assert_eq!(closed_chan_events.len(), 4); match closed_chan_events[0] { Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event"), } match closed_chan_events[1] { + Event::PaymentPathFailed { .. } => {}, + _ => panic!("Unexpected event"), + } + match closed_chan_events[2] { Event::ChannelClosed { .. } => {}, _ => panic!("Unexpected event"), } + match closed_chan_events[3] { + Event::PaymentPathFailed { .. } => {}, + _ => panic!("Unexpected event"), + } // Pass the first part of the payment along the path. check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed @@ -1989,15 +2005,16 @@ fn auto_retry_zero_attempts_send_error() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure); - let err = nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap_err(); - if let PaymentSendFailure::AllFailedResendSafe(_) = err { - } else { panic!("Unexpected error"); } + nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages - assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 1); // channel close event + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 3); + if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); } + if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); } + if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); } check_added_monitors!(nodes[0], 2); } @@ -2027,7 +2044,6 @@ fn fails_paying_after_rejected_by_payee() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); @@ -2074,7 +2090,6 @@ fn retry_multi_path_single_failed_payment() { let route_params = RouteParameters { payment_params: payment_params.clone(), final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let chans = nodes[0].node.list_usable_channels(); @@ -2103,11 +2118,13 @@ fn retry_multi_path_single_failed_payment() { // On retry, split the payment across both channels. route.paths[0][0].fee_msat = 50_000_001; route.paths[1][0].fee_msat = 50_000_000; + let mut pay_params = route.payment_params.clone().unwrap(); + pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap()); nodes[0].router.expect_find_route(RouteParameters { - payment_params: route.payment_params.clone().unwrap(), + payment_params: pay_params, // Note that the second request here requests the amount we originally failed to send, // not the amount remaining on the full payment, which should be changed. - final_value_msat: 100_000_001, final_cltv_expiry_delta: TEST_FINAL_CLTV + final_value_msat: 100_000_001, }, Ok(route.clone())); { @@ -2121,6 +2138,19 @@ fn retry_multi_path_single_failed_payment() { } nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }}, + short_channel_id: Some(expected_scid), .. } => + { + assert_eq!(payment_hash, ev_payment_hash); + assert_eq!(expected_scid, route.paths[1][0].short_channel_id); + assert!(err_msg.contains("max HTLC")); + }, + _ => panic!("Unexpected event"), + } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); check_added_monitors!(nodes[0], 2); @@ -2153,7 +2183,6 @@ fn immediate_retry_on_failure() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let chans = nodes[0].node.list_usable_channels(); @@ -2176,12 +2205,26 @@ fn immediate_retry_on_failure() { route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap(); route.paths[0][0].fee_msat = 50_000_000; route.paths[1][0].fee_msat = 50_000_001; + let mut pay_params = route_params.payment_params.clone(); + pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap()); nodes[0].router.expect_find_route(RouteParameters { - payment_params: route_params.payment_params.clone(), - final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV + payment_params: pay_params, final_value_msat: amt_msat, }, Ok(route.clone())); nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }}, + short_channel_id: Some(expected_scid), .. } => + { + assert_eq!(payment_hash, ev_payment_hash); + assert_eq!(expected_scid, route.paths[1][0].short_channel_id); + assert!(err_msg.contains("max HTLC")); + }, + _ => panic!("Unexpected event"), + } let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(htlc_msgs.len(), 2); check_added_monitors!(nodes[0], 2); @@ -2190,7 +2233,8 @@ fn immediate_retry_on_failure() { #[test] fn no_extra_retries_on_back_to_back_fail() { // In a previous release, we had a race where we may exceed the payment retry count if we - // get two failures in a row with the second having `all_paths_failed` set. + // get two failures in a row with the second indicating that all paths had failed (this field, + // `all_paths_failed`, has since been removed). // Generally, when we give up trying to retry a payment, we don't know for sure what the // current state of the ChannelManager event queue is. Specifically, we cannot be sure that // there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events @@ -2227,7 +2271,6 @@ fn no_extra_retries_on_back_to_back_fail() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let mut route = Route { @@ -2273,7 +2316,7 @@ fn no_extra_retries_on_back_to_back_fail() { route.paths[0][1].fee_msat = amt_msat; nodes[0].router.expect_find_route(RouteParameters { payment_params: second_payment_params, - final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV, + final_value_msat: amt_msat, }, Ok(route.clone())); nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); @@ -2428,7 +2471,6 @@ fn test_simple_partial_retry() { let route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let mut route = Route { @@ -2473,7 +2515,7 @@ fn test_simple_partial_retry() { route.paths.remove(0); nodes[0].router.expect_find_route(RouteParameters { payment_params: second_payment_params, - final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV, + final_value_msat: amt_msat / 2, }, Ok(route.clone())); nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); @@ -2594,7 +2636,6 @@ fn test_threaded_payment_retries() { let mut route_params = RouteParameters { payment_params, final_value_msat: amt_msat, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let mut route = Route { @@ -2716,3 +2757,84 @@ fn test_threaded_payment_retries() { } } } + +fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { + // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still + // receive the PaymentSent event even if the ChannelManager had no idea about the payment when + // it was last persisted. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (persister_a, persister_b, persister_c); + let (chain_monitor_a, chain_monitor_b, chain_monitor_c); + let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + + let mut nodes_0_serialized = Vec::new(); + if !persist_manager_with_payment { + nodes_0_serialized = nodes[0].node.encode(); + } + + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + + if persist_manager_with_payment { + nodes_0_serialized = nodes[0].node.encode(); + } + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); + + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + + // The ChannelMonitor should always be the latest version, as we're required to persist it + // during the commitment signed handling. + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); } + // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid + // the double-claim that would otherwise appear at the end of this test. + nodes[0].node.timer_tick_occurred(); + let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(as_broadcasted_txn.len(), 1); + + // Ensure that, even after some time, if we restart we still include *something* in the current + // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved + // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`. + // A naive implementation of the fix here would wipe the pending payments set, causing a + // failure event when we restart. + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b); + let events = nodes[0].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + // Ensure that we don't generate any further events even after the channel-closing commitment + // transaction is confirmed on-chain. + confirm_transaction(&nodes[0], &as_broadcasted_txn[0]); + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + + let events = nodes[0].node.get_and_clear_pending_events(); + assert!(events.is_empty()); + + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c); + let events = nodes[0].node.get_and_clear_pending_events(); + assert!(events.is_empty()); +} + +#[test] +fn no_missing_sent_on_midpoint_reload() { + do_no_missing_sent_on_midpoint_reload(false); + do_no_missing_sent_on_midpoint_reload(true); +}