1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 //! Tests that test the payment retry logic in ChannelManager, including various edge-cases around
11 //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
12 //! payments thereafter.
14 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
15 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
16 use crate::chain::keysinterface::EntropySource;
17 use crate::chain::transaction::OutPoint;
18 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
19 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
20 use crate::ln::features::InvoiceFeatures;
22 use crate::ln::msgs::ChannelMessageHandler;
23 use crate::ln::outbound_payment::Retry;
24 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
25 use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
26 use crate::routing::scoring::ChannelUsage;
27 use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
28 use crate::util::test_utils;
29 use crate::util::errors::APIError;
30 use crate::util::ser::Writeable;
32 use bitcoin::{Block, BlockHeader, TxMerkleNode};
33 use bitcoin::hashes::Hash;
34 use bitcoin::network::constants::Network;
36 use crate::prelude::*;
38 use crate::ln::functional_test_utils::*;
39 use crate::routing::gossip::NodeId;
40 #[cfg(feature = "std")]
42 crate::util::time::tests::SinceEpoch,
43 std::time::{SystemTime, Instant, Duration}
48 let chanmon_cfgs = create_chanmon_cfgs(4);
49 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
50 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
51 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
53 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
54 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
55 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
56 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
58 let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
59 let path = route.paths[0].clone();
60 route.paths.push(path);
61 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
62 route.paths[0][0].short_channel_id = chan_1_id;
63 route.paths[0][1].short_channel_id = chan_3_id;
64 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
65 route.paths[1][0].short_channel_id = chan_2_id;
66 route.paths[1][1].short_channel_id = chan_4_id;
67 send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
68 fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
73 let chanmon_cfgs = create_chanmon_cfgs(4);
74 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
75 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
76 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
78 let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
79 let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
80 let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
81 let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2);
83 send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
85 let amt_msat = 1_000_000;
86 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], amt_msat);
87 let path = route.paths[0].clone();
88 route.paths.push(path);
89 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
90 route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
91 route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
92 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
93 route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
94 route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
96 // Initiate the MPP payment.
97 let payment_id = PaymentId(payment_hash.0);
98 let mut route_params = RouteParameters {
99 payment_params: route.payment_params.clone().unwrap(),
100 final_value_msat: amt_msat,
101 final_cltv_expiry_delta: TEST_FINAL_CLTV,
104 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
105 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
106 check_added_monitors!(nodes[0], 2); // one monitor per path
107 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
108 assert_eq!(events.len(), 2);
110 // Pass half of the payment along the success path.
111 let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
112 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
114 // Add the HTLC along the first hop.
115 let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
116 let (update_add, commitment_signed) = match fail_path_msgs_1 {
117 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
118 assert_eq!(update_add_htlcs.len(), 1);
119 assert!(update_fail_htlcs.is_empty());
120 assert!(update_fulfill_htlcs.is_empty());
121 assert!(update_fail_malformed_htlcs.is_empty());
122 assert!(update_fee.is_none());
123 (update_add_htlcs[0].clone(), commitment_signed.clone())
125 _ => panic!("Unexpected event"),
127 nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
128 commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
130 // Attempt to forward the payment and complete the 2nd path's failure.
131 expect_pending_htlcs_forwardable!(&nodes[2]);
132 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
133 let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
134 assert!(htlc_updates.update_add_htlcs.is_empty());
135 assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
136 assert!(htlc_updates.update_fulfill_htlcs.is_empty());
137 assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
138 check_added_monitors!(nodes[2], 1);
139 nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
140 commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
141 let mut events = nodes[0].node.get_and_clear_pending_events();
143 Event::PendingHTLCsForwardable { .. } => {},
144 _ => panic!("Unexpected event")
147 expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
149 // Rebalance the channel so the second half of the payment can succeed.
150 send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
152 // Retry the second half of the payment and make sure it succeeds.
153 route.paths.remove(0);
154 route_params.final_value_msat = 1_000_000;
155 route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
156 nodes[0].router.expect_find_route(route_params, Ok(route));
157 nodes[0].node.process_pending_htlc_forwards();
158 check_added_monitors!(nodes[0], 1);
159 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
160 assert_eq!(events.len(), 1);
161 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
162 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
165 fn do_mpp_receive_timeout(send_partial_mpp: bool) {
166 let chanmon_cfgs = create_chanmon_cfgs(4);
167 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
168 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
169 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
171 let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
172 let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
173 let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
174 let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3);
176 let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
177 let path = route.paths[0].clone();
178 route.paths.push(path);
179 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
180 route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
181 route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
182 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
183 route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
184 route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
186 // Initiate the MPP payment.
187 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
188 check_added_monitors!(nodes[0], 2); // one monitor per path
189 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
190 assert_eq!(events.len(), 2);
192 // Pass half of the payment along the first path.
193 let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
194 pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
196 if send_partial_mpp {
197 // Time out the partial MPP
198 for _ in 0..MPP_TIMEOUT_TICKS {
199 nodes[3].node.timer_tick_occurred();
202 // Failed HTLC from node 3 -> 1
203 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
204 let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
205 assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
206 nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
207 check_added_monitors!(nodes[3], 1);
208 commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
210 // Failed HTLC from node 1 -> 0
211 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
212 let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
213 assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
214 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
215 check_added_monitors!(nodes[1], 1);
216 commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
218 expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
220 // Pass half of the payment along the second path.
221 let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
222 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None);
224 // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
225 for _ in 0..MPP_TIMEOUT_TICKS {
226 nodes[3].node.timer_tick_occurred();
229 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
234 fn mpp_receive_timeout() {
235 do_mpp_receive_timeout(true);
236 do_mpp_receive_timeout(false);
240 fn no_pending_leak_on_initial_send_failure() {
241 // In an earlier version of our payment tracking, we'd have a retry entry even when the initial
242 // HTLC for payment failed to send due to local channel errors (e.g. peer disconnected). In this
243 // case, the user wouldn't have a PaymentId to retry the payment with, but we'd think we have a
244 // pending payment forever and never time it out.
245 // Here we test exactly that - retrying a payment when a peer was disconnected on the first
246 // try, and then check that no pending payment is being tracked.
247 let chanmon_cfgs = create_chanmon_cfgs(2);
248 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
249 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
250 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
252 create_announced_chan_between_nodes(&nodes, 0, 1);
254 let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
256 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
257 nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
259 unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
260 true, APIError::ChannelUnavailable { ref err },
261 assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
263 assert!(!nodes[0].node.has_pending_payments());
266 fn do_retry_with_no_persist(confirm_before_reload: bool) {
267 // If we send a pending payment and `send_payment` returns success, we should always either
268 // return a payment failure event or a payment success event, and on failure the payment should
271 // In order to do so when the ChannelManager isn't immediately persisted (which is normal - its
272 // always persisted asynchronously), the ChannelManager has to reload some payment data from
273 // ChannelMonitor(s) in some cases. This tests that reloading.
275 // `confirm_before_reload` confirms the channel-closing commitment transaction on-chain prior
276 // to reloading the ChannelManager, increasing test coverage in ChannelMonitor HTLC tracking
277 // which has separate codepaths for "commitment transaction already confirmed" and not.
278 let chanmon_cfgs = create_chanmon_cfgs(3);
279 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
280 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
281 let persister: test_utils::TestPersister;
282 let new_chain_monitor: test_utils::TestChainMonitor;
283 let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
284 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
286 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
287 let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
289 // Serialize the ChannelManager prior to sending payments
290 let nodes_0_serialized = nodes[0].node.encode();
292 // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
294 let amt_msat = 1_000_000;
295 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
296 let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
297 let route_params = RouteParameters {
298 payment_params: route.payment_params.clone().unwrap(),
299 final_value_msat: amt_msat,
300 final_cltv_expiry_delta: TEST_FINAL_CLTV,
302 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
303 check_added_monitors!(nodes[0], 1);
305 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
306 assert_eq!(events.len(), 1);
307 let payment_event = SendEvent::from_event(events.pop().unwrap());
308 assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
310 // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
311 // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
312 // which would prevent retry.
313 nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
314 nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
316 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
317 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
318 // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
319 let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
321 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
323 let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone();
324 if confirm_before_reload {
325 mine_transaction(&nodes[0], &as_commitment_tx);
326 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
329 // The ChannelMonitor should always be the latest version, as we're required to persist it
330 // during the `commitment_signed_dance!()`.
331 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
332 reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
334 // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
335 // force-close the channel.
336 check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
337 assert!(nodes[0].node.list_channels().is_empty());
338 assert!(nodes[0].node.has_pending_payments());
339 let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
340 assert_eq!(as_broadcasted_txn.len(), 1);
341 assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
343 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
344 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
345 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
347 // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
348 // error, as the channel has hit the chain.
349 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
350 let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
351 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
352 let as_err = nodes[0].node.get_and_clear_pending_msg_events();
353 assert_eq!(as_err.len(), 1);
355 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
356 assert_eq!(node_id, nodes[1].node.get_our_node_id());
357 nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
358 check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
359 check_added_monitors!(nodes[1], 1);
360 assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
362 _ => panic!("Unexpected event"),
364 check_closed_broadcast!(nodes[1], false);
366 // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when
367 // we close in a moment.
368 nodes[2].node.claim_funds(payment_preimage_1);
369 check_added_monitors!(nodes[2], 1);
370 expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
372 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
373 nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
374 check_added_monitors!(nodes[1], 1);
375 commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
376 expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
378 if confirm_before_reload {
379 let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
380 nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
383 // Create a new channel on which to retry the payment before we fail the payment via the
384 // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
385 // connecting several blocks while creating the channel (implying time has passed).
386 create_announced_chan_between_nodes(&nodes, 0, 1);
387 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
389 mine_transaction(&nodes[1], &as_commitment_tx);
390 let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
391 assert_eq!(bs_htlc_claim_txn.len(), 1);
392 check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
394 if !confirm_before_reload {
395 mine_transaction(&nodes[0], &as_commitment_tx);
397 mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
398 expect_payment_sent!(nodes[0], payment_preimage_1);
399 connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
400 let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
401 assert_eq!(as_htlc_timeout_txn.len(), 2);
402 let (first_htlc_timeout_tx, second_htlc_timeout_tx) = (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1]);
403 check_spends!(first_htlc_timeout_tx, as_commitment_tx);
404 check_spends!(second_htlc_timeout_tx, as_commitment_tx);
405 if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
406 confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
408 confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
410 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
411 expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
413 // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
414 // reloaded) via a route over the new channel, which work without issue and eventually be
415 // received and claimed at the recipient just like any other payment.
416 let (mut new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
418 // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
419 // and not the original fee. We also update node[1]'s relevant config as
420 // do_claim_payment_along_route expects us to never overpay.
422 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
423 let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
424 .unwrap().lock().unwrap();
425 let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
426 let mut new_config = channel.config();
427 new_config.forwarding_fee_base_msat += 100_000;
428 channel.update_config(&new_config);
429 new_route.paths[0][0].fee_msat += 100_000;
432 // Force expiration of the channel's previous config.
433 for _ in 0..EXPIRE_PREV_CONFIG_TICKS {
434 nodes[1].node.timer_tick_occurred();
437 assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
438 nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
439 check_added_monitors!(nodes[0], 1);
440 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
441 assert_eq!(events.len(), 1);
442 pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
443 do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
444 expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0][0].fee_msat));
448 fn retry_with_no_persist() {
449 do_retry_with_no_persist(true);
450 do_retry_with_no_persist(false);
453 fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
454 // Test that an off-chain completed payment is not retryable on restart. This was previously
455 // broken for dust payments, but we test for both dust and non-dust payments.
457 // `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
459 let chanmon_cfgs = create_chanmon_cfgs(3);
460 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
462 let mut manually_accept_config = test_default_channel_config();
463 manually_accept_config.manually_accept_inbound_channels = true;
465 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]);
467 let first_persister: test_utils::TestPersister;
468 let first_new_chain_monitor: test_utils::TestChainMonitor;
469 let first_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
470 let second_persister: test_utils::TestPersister;
471 let second_new_chain_monitor: test_utils::TestChainMonitor;
472 let second_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
473 let third_persister: test_utils::TestPersister;
474 let third_new_chain_monitor: test_utils::TestChainMonitor;
475 let third_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
477 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
479 // Because we set nodes[1] to manually accept channels, just open a 0-conf channel.
480 let (funding_tx, chan_id) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
481 confirm_transaction(&nodes[0], &funding_tx);
482 confirm_transaction(&nodes[1], &funding_tx);
483 // Ignore the announcement_signatures messages
484 nodes[0].node.get_and_clear_pending_msg_events();
485 nodes[1].node.get_and_clear_pending_msg_events();
486 let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
488 // Serialize the ChannelManager prior to sending payments
489 let mut nodes_0_serialized = nodes[0].node.encode();
491 let route = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }).0;
492 let (payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], if use_dust { 1_000 } else { 1_000_000 });
494 // The ChannelMonitor should always be the latest version, as we're required to persist it
495 // during the `commitment_signed_dance!()`.
496 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
498 reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
499 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
501 // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
502 // force-close the channel.
503 check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
504 assert!(nodes[0].node.list_channels().is_empty());
505 assert!(nodes[0].node.has_pending_payments());
506 assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
508 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }).unwrap();
509 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
511 // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
512 // error, as the channel has hit the chain.
513 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }).unwrap();
514 let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
515 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
516 let as_err = nodes[0].node.get_and_clear_pending_msg_events();
517 assert_eq!(as_err.len(), 1);
518 let bs_commitment_tx;
520 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
521 assert_eq!(node_id, nodes[1].node.get_our_node_id());
522 nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
523 check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
524 check_added_monitors!(nodes[1], 1);
525 bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
527 _ => panic!("Unexpected event"),
529 check_closed_broadcast!(nodes[1], false);
531 // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
532 // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
533 // incoming HTLCs with the same payment hash later.
534 nodes[2].node.fail_htlc_backwards(&payment_hash);
535 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
536 check_added_monitors!(nodes[2], 1);
538 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
539 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]);
540 commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
541 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
542 [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
544 // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
545 // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
546 // after the commitment transaction, so always connect the commitment transaction.
547 mine_transaction(&nodes[0], &bs_commitment_tx[0]);
548 mine_transaction(&nodes[1], &bs_commitment_tx[0]);
550 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
551 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
552 let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
553 check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
554 assert_eq!(as_htlc_timeout.len(), 1);
556 mine_transaction(&nodes[0], &as_htlc_timeout[0]);
557 // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
558 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
559 mine_transaction(&nodes[1], &as_htlc_timeout[0]);
562 // Create a new channel on which to retry the payment before we fail the payment via the
563 // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
564 // connecting several blocks while creating the channel (implying time has passed).
565 // We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
566 let (_, chan_id_3) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
567 assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
569 // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
570 // confirming, we will fail as it's considered still-pending...
571 let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 });
572 match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
573 Err(PaymentSendFailure::DuplicatePayment) => {},
574 _ => panic!("Unexpected error")
576 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
578 // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
579 // again. We serialize the node first as we'll then test retrying the HTLC after a restart
580 // (which should also still work).
581 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
582 connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
583 expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
585 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
586 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
587 nodes_0_serialized = nodes[0].node.encode();
589 // After the payment failed, we're free to send it again.
590 assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
591 assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
593 reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
594 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
596 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
598 // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
599 // the payment is not (spuriously) listed as still pending.
600 assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
601 check_added_monitors!(nodes[0], 1);
602 pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret);
603 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
605 match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
606 Err(PaymentSendFailure::DuplicatePayment) => {},
607 _ => panic!("Unexpected error")
609 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
611 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
612 let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
613 nodes_0_serialized = nodes[0].node.encode();
615 // Check that after reload we can send the payment again (though we shouldn't, since it was
616 // claimed previously).
617 reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
618 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
620 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
622 match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
623 Err(PaymentSendFailure::DuplicatePayment) => {},
624 _ => panic!("Unexpected error")
626 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
630 fn test_completed_payment_not_retryable_on_reload() {
631 do_test_completed_payment_not_retryable_on_reload(true);
632 do_test_completed_payment_not_retryable_on_reload(false);
636 fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
637 // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
638 // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
639 // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
640 // the ChannelMonitor tells it to.
642 // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
643 // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
644 // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
645 let chanmon_cfgs = create_chanmon_cfgs(2);
646 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
647 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
648 let persister: test_utils::TestPersister;
649 let new_chain_monitor: test_utils::TestChainMonitor;
650 let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
651 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
653 let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
655 // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
657 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
658 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
659 check_closed_broadcast!(nodes[0], true);
660 check_added_monitors!(nodes[0], 1);
661 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
663 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
664 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
666 // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
667 connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
668 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
669 assert_eq!(node_txn.len(), 3);
670 assert_eq!(node_txn[0], node_txn[1]);
671 check_spends!(node_txn[1], funding_tx);
672 check_spends!(node_txn[2], node_txn[1]);
673 let timeout_txn = vec![node_txn[2].clone()];
675 nodes[1].node.claim_funds(payment_preimage);
676 check_added_monitors!(nodes[1], 1);
677 expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
679 let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
680 connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
681 check_closed_broadcast!(nodes[1], true);
682 check_added_monitors!(nodes[1], 1);
683 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
684 let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
685 assert_eq!(claim_txn.len(), 1);
686 check_spends!(claim_txn[0], node_txn[1]);
688 header.prev_blockhash = nodes[0].best_block_hash();
689 connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
691 if confirm_commitment_tx {
692 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
695 header.prev_blockhash = nodes[0].best_block_hash();
696 let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
699 assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
700 connect_block(&nodes[0], &claim_block);
701 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
704 // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
705 // returning InProgress. This should cause the claim event to never make its way to the
707 chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
708 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
711 connect_blocks(&nodes[0], 1);
713 connect_block(&nodes[0], &claim_block);
716 let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
717 let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
718 .get_mut(&funding_txo).unwrap().drain().collect();
719 // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
720 // If we're testing connection idempotency we may get substantially more.
721 assert!(mon_updates.len() >= 1);
722 assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
723 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
725 // If we persist the ChannelManager here, we should get the PaymentSent event after
727 let mut chan_manager_serialized = Vec::new();
728 if !persist_manager_post_event {
729 chan_manager_serialized = nodes[0].node.encode();
732 // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
733 // payment sent event.
734 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
735 let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
736 for update in mon_updates {
737 nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
740 expect_payment_failed!(nodes[0], payment_hash, false);
742 expect_payment_sent!(nodes[0], payment_preimage);
745 // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
747 if persist_manager_post_event {
748 chan_manager_serialized = nodes[0].node.encode();
751 // Now reload nodes[0]...
752 reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
754 if persist_manager_post_event {
755 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
756 } else if payment_timeout {
757 expect_payment_failed!(nodes[0], payment_hash, false);
759 expect_payment_sent!(nodes[0], payment_preimage);
762 // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
763 // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
764 // payment events should kick in, leaving us with no pending events here.
765 let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
766 nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
767 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
771 fn test_dup_htlc_onchain_fails_on_reload() {
772 do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
773 do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
774 do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
775 do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
776 do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
777 do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
781 fn test_fulfill_restart_failure() {
782 // When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully
783 // fulfilled. At this point, the peer can reconnect and decide to either fulfill the HTLC
784 // again, or fail it, giving us free money.
786 // Of course probably they won't fail it and give us free money, but because we have code to
787 // handle it, we should test the logic for it anyway. We do that here.
788 let chanmon_cfgs = create_chanmon_cfgs(2);
789 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
790 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
791 let persister: test_utils::TestPersister;
792 let new_chain_monitor: test_utils::TestChainMonitor;
793 let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
794 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
796 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
797 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
799 // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
800 // pre-fulfill, which we do by serializing it here.
801 let chan_manager_serialized = nodes[1].node.encode();
802 let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode();
804 nodes[1].node.claim_funds(payment_preimage);
805 check_added_monitors!(nodes[1], 1);
806 expect_payment_claimed!(nodes[1], payment_hash, 100_000);
808 let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
809 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
810 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
812 // Now reload nodes[1]...
813 reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
815 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
816 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
818 nodes[1].node.fail_htlc_backwards(&payment_hash);
819 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
820 check_added_monitors!(nodes[1], 1);
821 let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
822 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
823 commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false);
824 // nodes[0] shouldn't generate any events here, while it just got a payment failure completion
825 // it had already considered the payment fulfilled, and now they just got free money.
826 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
830 fn get_ldk_payment_preimage() {
831 // Ensure that `ChannelManager::get_payment_preimage` can successfully be used to claim a payment.
832 let chanmon_cfgs = create_chanmon_cfgs(2);
833 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
834 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
835 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
836 create_announced_chan_between_nodes(&nodes, 0, 1);
838 let amt_msat = 60_000;
839 let expiry_secs = 60 * 60;
840 let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap();
842 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
843 .with_features(nodes[1].node.invoice_features());
844 let scorer = test_utils::TestScorer::new();
845 let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
846 let random_seed_bytes = keys_manager.get_secure_random_bytes();
847 let route = get_route(
848 &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
849 Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
850 amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
851 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
852 check_added_monitors!(nodes[0], 1);
854 // Make sure to use `get_payment_preimage`
855 let payment_preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
856 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
857 assert_eq!(events.len(), 1);
858 pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage));
859 claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
863 fn sent_probe_is_probe_of_sending_node() {
864 let chanmon_cfgs = create_chanmon_cfgs(3);
865 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
866 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
867 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
869 create_announced_chan_between_nodes(&nodes, 0, 1);
870 create_announced_chan_between_nodes(&nodes, 1, 2);
872 // First check we refuse to build a single-hop probe
873 let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
874 assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err());
876 // Then build an actual two-hop probing path
877 let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
879 match nodes[0].node.send_probe(route.paths[0].clone()) {
880 Ok((payment_hash, payment_id)) => {
881 assert!(nodes[0].node.payment_is_probe(&payment_hash, &payment_id));
882 assert!(!nodes[1].node.payment_is_probe(&payment_hash, &payment_id));
883 assert!(!nodes[2].node.payment_is_probe(&payment_hash, &payment_id));
888 get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
889 check_added_monitors!(nodes[0], 1);
893 fn successful_probe_yields_event() {
894 let chanmon_cfgs = create_chanmon_cfgs(3);
895 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
896 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
897 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
899 create_announced_chan_between_nodes(&nodes, 0, 1);
900 create_announced_chan_between_nodes(&nodes, 1, 2);
902 let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
904 let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
906 // node[0] -- update_add_htlcs -> node[1]
907 check_added_monitors!(nodes[0], 1);
908 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
909 let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
910 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
911 check_added_monitors!(nodes[1], 0);
912 commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
913 expect_pending_htlcs_forwardable!(nodes[1]);
915 // node[1] -- update_add_htlcs -> node[2]
916 check_added_monitors!(nodes[1], 1);
917 let updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
918 let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
919 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &probe_event.msgs[0]);
920 check_added_monitors!(nodes[2], 0);
921 commitment_signed_dance!(nodes[2], nodes[1], probe_event.commitment_msg, true, true);
923 // node[1] <- update_fail_htlcs -- node[2]
924 let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
925 nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
926 check_added_monitors!(nodes[1], 0);
927 commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, true);
929 // node[0] <- update_fail_htlcs -- node[1]
930 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
931 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
932 check_added_monitors!(nodes[0], 0);
933 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
935 let mut events = nodes[0].node.get_and_clear_pending_events();
936 assert_eq!(events.len(), 1);
937 match events.drain(..).next().unwrap() {
938 crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
939 assert_eq!(payment_id, ev_pid);
940 assert_eq!(payment_hash, ev_ph);
944 assert!(!nodes[0].node.has_pending_payments());
948 fn failed_probe_yields_event() {
949 let chanmon_cfgs = create_chanmon_cfgs(3);
950 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
951 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
952 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
954 create_announced_chan_between_nodes(&nodes, 0, 1);
955 create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000);
957 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
959 let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
961 let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
963 // node[0] -- update_add_htlcs -> node[1]
964 check_added_monitors!(nodes[0], 1);
965 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
966 let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
967 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
968 check_added_monitors!(nodes[1], 0);
969 commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
970 expect_pending_htlcs_forwardable!(nodes[1]);
972 // node[0] <- update_fail_htlcs -- node[1]
973 check_added_monitors!(nodes[1], 1);
974 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
975 // Skip the PendingHTLCsForwardable event
976 let _events = nodes[1].node.get_and_clear_pending_events();
977 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
978 check_added_monitors!(nodes[0], 0);
979 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
981 let mut events = nodes[0].node.get_and_clear_pending_events();
982 assert_eq!(events.len(), 1);
983 match events.drain(..).next().unwrap() {
984 crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
985 assert_eq!(payment_id, ev_pid);
986 assert_eq!(payment_hash, ev_ph);
990 assert!(!nodes[0].node.has_pending_payments());
994 fn onchain_failed_probe_yields_event() {
995 // Tests that an attempt to probe over a channel that is eventaully closed results in a failure
997 let chanmon_cfgs = create_chanmon_cfgs(3);
998 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
999 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1000 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1002 let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1003 create_announced_chan_between_nodes(&nodes, 1, 2);
1005 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
1007 // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
1008 let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000, 42);
1009 let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
1011 // node[0] -- update_add_htlcs -> node[1]
1012 check_added_monitors!(nodes[0], 1);
1013 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1014 let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
1015 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
1016 check_added_monitors!(nodes[1], 0);
1017 commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
1018 expect_pending_htlcs_forwardable!(nodes[1]);
1020 check_added_monitors!(nodes[1], 1);
1021 let _ = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1023 // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on
1024 // Node A, which after 6 confirmations should result in a probe failure event.
1025 let bs_txn = get_local_commitment_txn!(nodes[1], chan_id);
1026 confirm_transaction(&nodes[0], &bs_txn[0]);
1027 check_closed_broadcast!(&nodes[0], true);
1028 check_added_monitors!(nodes[0], 1);
1030 let mut events = nodes[0].node.get_and_clear_pending_events();
1031 assert_eq!(events.len(), 2);
1032 let mut found_probe_failed = false;
1033 for event in events.drain(..) {
1035 Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
1036 assert_eq!(payment_id, ev_pid);
1037 assert_eq!(payment_hash, ev_ph);
1038 found_probe_failed = true;
1040 Event::ChannelClosed { .. } => {},
1044 assert!(found_probe_failed);
1045 assert!(!nodes[0].node.has_pending_payments());
1049 fn claimed_send_payment_idempotent() {
1050 // Tests that `send_payment` (and friends) are (reasonably) idempotent.
1051 let chanmon_cfgs = create_chanmon_cfgs(2);
1052 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1053 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1054 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1056 create_announced_chan_between_nodes(&nodes, 0, 1).2;
1058 let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1059 let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1061 macro_rules! check_send_rejected {
1063 // If we try to resend a new payment with a different payment_hash but with the same
1064 // payment_id, it should be rejected.
1065 let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1067 Err(PaymentSendFailure::DuplicatePayment) => {},
1068 _ => panic!("Unexpected send result: {:?}", send_result),
1071 // Further, if we try to send a spontaneous payment with the same payment_id it should
1072 // also be rejected.
1073 let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1075 Err(PaymentSendFailure::DuplicatePayment) => {},
1076 _ => panic!("Unexpected send result: {:?}", send_result),
1081 check_send_rejected!();
1083 // Claim the payment backwards, but note that the PaymentSent event is still pending and has
1084 // not been seen by the user. At this point, from the user perspective nothing has changed, so
1085 // we must remain just as idempotent as we were before.
1086 do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, first_payment_preimage);
1088 for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1089 nodes[0].node.timer_tick_occurred();
1092 check_send_rejected!();
1094 // Once the user sees and handles the `PaymentSent` event, we expect them to no longer call
1095 // `send_payment`, and our idempotency guarantees are off - they should have atomically marked
1096 // the payment complete. However, they could have called `send_payment` while the event was
1097 // being processed, leading to a race in our idempotency guarantees. Thus, even immediately
1098 // after the event is handled a duplicate payment should sitll be rejected.
1099 expect_payment_sent!(&nodes[0], first_payment_preimage, Some(0));
1100 check_send_rejected!();
1102 // If relatively little time has passed, a duplicate payment should still fail.
1103 nodes[0].node.timer_tick_occurred();
1104 check_send_rejected!();
1106 // However, after some time has passed (at least more than the one timer tick above), a
1107 // duplicate payment should go through, as ChannelManager should no longer have any remaining
1108 // references to the old payment data.
1109 for _ in 0..IDEMPOTENCY_TIMEOUT_TICKS {
1110 nodes[0].node.timer_tick_occurred();
1113 nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1114 check_added_monitors!(nodes[0], 1);
1115 pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1116 claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1120 fn abandoned_send_payment_idempotent() {
1121 // Tests that `send_payment` (and friends) allow duplicate PaymentIds immediately after
1123 let chanmon_cfgs = create_chanmon_cfgs(2);
1124 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1125 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1126 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1128 create_announced_chan_between_nodes(&nodes, 0, 1).2;
1130 let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1131 let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1133 macro_rules! check_send_rejected {
1135 // If we try to resend a new payment with a different payment_hash but with the same
1136 // payment_id, it should be rejected.
1137 let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1139 Err(PaymentSendFailure::DuplicatePayment) => {},
1140 _ => panic!("Unexpected send result: {:?}", send_result),
1143 // Further, if we try to send a spontaneous payment with the same payment_id it should
1144 // also be rejected.
1145 let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1147 Err(PaymentSendFailure::DuplicatePayment) => {},
1148 _ => panic!("Unexpected send result: {:?}", send_result),
1153 check_send_rejected!();
1155 nodes[1].node.fail_htlc_backwards(&first_payment_hash);
1156 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
1158 // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
1160 for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1161 nodes[0].node.timer_tick_occurred();
1163 check_send_rejected!();
1165 pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
1167 // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the
1168 // failed payment back.
1169 nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1170 check_added_monitors!(nodes[0], 1);
1171 pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1172 claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1175 #[derive(PartialEq)]
1176 enum InterceptTest {
1183 fn test_trivial_inflight_htlc_tracking(){
1184 // In this test, we test three scenarios:
1185 // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs
1186 // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs
1187 // (3) After we claim the payment sent in (2), InFlightHtlcs should return `None` for the query.
1188 let chanmon_cfgs = create_chanmon_cfgs(3);
1189 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1190 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1191 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1193 let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
1194 let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
1196 // Send and claim the payment. Inflight HTLCs should be empty.
1197 let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 500000);
1198 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
1199 check_added_monitors!(nodes[0], 1);
1200 pass_along_route(&nodes[0], &[&vec!(&nodes[1], &nodes[2])[..]], 500000, payment_hash, payment_secret);
1201 claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], payment_preimage);
1203 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1205 let mut node_0_per_peer_lock;
1206 let mut node_0_peer_state_lock;
1207 let mut node_1_per_peer_lock;
1208 let mut node_1_peer_state_lock;
1209 let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1210 let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1212 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1213 &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1214 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1215 channel_1.get_short_channel_id().unwrap()
1217 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1218 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1219 &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1220 channel_2.get_short_channel_id().unwrap()
1223 assert_eq!(chan_1_used_liquidity, None);
1224 assert_eq!(chan_2_used_liquidity, None);
1226 let pending_payments = nodes[0].node.list_recent_payments();
1227 assert_eq!(pending_payments.len(), 1);
1228 assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash) });
1230 // Remove fulfilled payment
1231 for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1232 nodes[0].node.timer_tick_occurred();
1235 // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
1236 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
1238 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1240 let mut node_0_per_peer_lock;
1241 let mut node_0_peer_state_lock;
1242 let mut node_1_per_peer_lock;
1243 let mut node_1_peer_state_lock;
1244 let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1245 let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1247 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1248 &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1249 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1250 channel_1.get_short_channel_id().unwrap()
1252 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1253 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1254 &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1255 channel_2.get_short_channel_id().unwrap()
1258 // First hop accounts for expected 1000 msat fee
1259 assert_eq!(chan_1_used_liquidity, Some(501000));
1260 assert_eq!(chan_2_used_liquidity, Some(500000));
1262 let pending_payments = nodes[0].node.list_recent_payments();
1263 assert_eq!(pending_payments.len(), 1);
1264 assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_hash, total_msat: 500000 });
1266 // Now, let's claim the payment. This should result in the used liquidity to return `None`.
1267 claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1269 // Remove fulfilled payment
1270 for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1271 nodes[0].node.timer_tick_occurred();
1275 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1277 let mut node_0_per_peer_lock;
1278 let mut node_0_peer_state_lock;
1279 let mut node_1_per_peer_lock;
1280 let mut node_1_peer_state_lock;
1281 let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1282 let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1284 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1285 &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1286 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1287 channel_1.get_short_channel_id().unwrap()
1289 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1290 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1291 &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1292 channel_2.get_short_channel_id().unwrap()
1295 assert_eq!(chan_1_used_liquidity, None);
1296 assert_eq!(chan_2_used_liquidity, None);
1299 let pending_payments = nodes[0].node.list_recent_payments();
1300 assert_eq!(pending_payments.len(), 0);
1304 fn test_holding_cell_inflight_htlcs() {
1305 let chanmon_cfgs = create_chanmon_cfgs(2);
1306 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1307 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1308 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1309 let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1311 let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1312 let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1314 // Queue up two payments - one will be delivered right away, one immediately goes into the
1315 // holding cell as nodes[0] is AwaitingRAA.
1317 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1318 check_added_monitors!(nodes[0], 1);
1319 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1320 check_added_monitors!(nodes[0], 0);
1323 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1326 let mut node_0_per_peer_lock;
1327 let mut node_0_peer_state_lock;
1328 let channel = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id);
1330 let used_liquidity = inflight_htlcs.used_liquidity_msat(
1331 &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1332 &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1333 channel.get_short_channel_id().unwrap()
1336 assert_eq!(used_liquidity, Some(2000000));
1339 // Clear pending events so test doesn't throw a "Had excess message on node..." error
1340 nodes[0].node.get_and_clear_pending_msg_events();
1344 fn intercepted_payment() {
1345 // Test that detecting an intercept scid on payment forward will signal LDK to generate an
1346 // intercept event, which the LSP can then use to either (a) open a JIT channel to forward the
1347 // payment or (b) fail the payment.
1348 do_test_intercepted_payment(InterceptTest::Forward);
1349 do_test_intercepted_payment(InterceptTest::Fail);
1350 // Make sure that intercepted payments will be automatically failed back if too many blocks pass.
1351 do_test_intercepted_payment(InterceptTest::Timeout);
1354 fn do_test_intercepted_payment(test: InterceptTest) {
1355 let chanmon_cfgs = create_chanmon_cfgs(3);
1356 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1358 let mut zero_conf_chan_config = test_default_channel_config();
1359 zero_conf_chan_config.manually_accept_inbound_channels = true;
1360 let mut intercept_forwards_config = test_default_channel_config();
1361 intercept_forwards_config.accept_intercept_htlcs = true;
1362 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]);
1364 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1365 let scorer = test_utils::TestScorer::new();
1366 let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
1368 let _ = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1370 let amt_msat = 100_000;
1371 let intercept_scid = nodes[1].node.get_intercept_scid();
1372 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1373 .with_route_hints(vec![
1374 RouteHint(vec![RouteHintHop {
1375 src_node_id: nodes[1].node.get_our_node_id(),
1376 short_channel_id: intercept_scid,
1379 proportional_millionths: 0,
1381 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
1382 htlc_minimum_msat: None,
1383 htlc_maximum_msat: None,
1386 .with_features(nodes[2].node.invoice_features());
1387 let route_params = RouteParameters {
1389 final_value_msat: amt_msat,
1390 final_cltv_expiry_delta: TEST_FINAL_CLTV,
1392 let route = get_route(
1393 &nodes[0].node.get_our_node_id(), &route_params.payment_params,
1394 &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
1395 route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes
1398 let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
1399 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
1400 let payment_event = {
1402 let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
1403 assert_eq!(added_monitors.len(), 1);
1404 added_monitors.clear();
1406 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1407 assert_eq!(events.len(), 1);
1408 SendEvent::from_event(events.remove(0))
1410 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1411 commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
1413 // Check that we generate the PaymentIntercepted event when an intercept forward is detected.
1414 let events = nodes[1].node.get_and_clear_pending_events();
1415 assert_eq!(events.len(), 1);
1416 let (intercept_id, expected_outbound_amount_msat) = match events[0] {
1417 crate::util::events::Event::HTLCIntercepted {
1418 intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
1420 assert_eq!(pmt_hash, payment_hash);
1421 assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees());
1422 assert_eq!(short_channel_id, intercept_scid);
1423 (intercept_id, expected_outbound_amount_msat)
1428 // Check for unknown channel id error.
1429 let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1430 assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
1432 if test == InterceptTest::Fail {
1433 // Ensure we can fail the intercepted payment back.
1434 nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
1435 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
1436 nodes[1].node.process_pending_htlc_forwards();
1437 let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1438 check_added_monitors!(&nodes[1], 1);
1439 assert!(update_fail.update_fail_htlcs.len() == 1);
1440 let fail_msg = update_fail.update_fail_htlcs[0].clone();
1441 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
1442 commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
1444 // Ensure the payment fails with the expected error.
1445 let fail_conditions = PaymentFailedConditions::new()
1446 .blamed_scid(intercept_scid)
1447 .blamed_chan_closed(true)
1448 .expected_htlc_error_data(0x4000 | 10, &[]);
1449 expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions);
1450 } else if test == InterceptTest::Forward {
1451 // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet.
1452 let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
1453 let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1454 assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(temp_chan_id)) });
1455 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
1457 // Open the just-in-time channel so the payment can then be forwarded.
1458 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1460 // Finally, forward the intercepted payment through and claim it.
1461 nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap();
1462 expect_pending_htlcs_forwardable!(nodes[1]);
1464 let payment_event = {
1466 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1467 assert_eq!(added_monitors.len(), 1);
1468 added_monitors.clear();
1470 let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1471 assert_eq!(events.len(), 1);
1472 SendEvent::from_event(events.remove(0))
1474 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
1475 commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true);
1476 expect_pending_htlcs_forwardable!(nodes[2]);
1478 let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
1479 expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
1480 do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
1481 let events = nodes[0].node.get_and_clear_pending_events();
1482 assert_eq!(events.len(), 2);
1484 Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
1485 assert_eq!(payment_preimage, *ev_preimage);
1486 assert_eq!(payment_hash, *ev_hash);
1487 assert_eq!(fee_paid_msat, &Some(1000));
1489 _ => panic!("Unexpected event")
1492 Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
1493 assert_eq!(hash, Some(payment_hash));
1495 _ => panic!("Unexpected event")
1497 } else if test == InterceptTest::Timeout {
1498 let mut block = Block {
1499 header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
1502 connect_block(&nodes[0], &block);
1503 connect_block(&nodes[1], &block);
1504 for _ in 0..TEST_FINAL_CLTV {
1505 block.header.prev_blockhash = block.block_hash();
1506 connect_block(&nodes[0], &block);
1507 connect_block(&nodes[1], &block);
1509 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::InvalidForward { requested_forward_scid: intercept_scid }]);
1510 check_added_monitors!(nodes[1], 1);
1511 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1512 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
1513 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
1514 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
1515 assert!(htlc_timeout_updates.update_fee.is_none());
1517 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
1518 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
1519 expect_payment_failed!(nodes[0], payment_hash, false, 0x2000 | 2, []);
1521 // Check for unknown intercept id error.
1522 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1523 let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1524 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1525 let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err();
1526 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1530 #[derive(PartialEq)]
1541 fn automatic_retries() {
1542 do_automatic_retries(AutoRetry::Success);
1543 do_automatic_retries(AutoRetry::Spontaneous);
1544 do_automatic_retries(AutoRetry::FailAttempts);
1545 do_automatic_retries(AutoRetry::FailTimeout);
1546 do_automatic_retries(AutoRetry::FailOnRestart);
1547 do_automatic_retries(AutoRetry::FailOnRetry);
1549 fn do_automatic_retries(test: AutoRetry) {
1550 // Test basic automatic payment retries in ChannelManager. See individual `test` variant comments
1552 let chanmon_cfgs = create_chanmon_cfgs(3);
1553 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1554 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1557 let new_chain_monitor;
1558 let node_0_deserialized;
1560 let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1561 let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1562 let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1564 // Marshall data to send the payment
1565 #[cfg(feature = "std")]
1566 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1567 #[cfg(not(feature = "std"))]
1568 let payment_expiry_secs = 60 * 60;
1569 let amt_msat = 1000;
1570 let mut invoice_features = InvoiceFeatures::empty();
1571 invoice_features.set_variable_length_onion_required();
1572 invoice_features.set_payment_secret_required();
1573 invoice_features.set_basic_mpp_optional();
1574 let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1575 .with_expiry_time(payment_expiry_secs as u64)
1576 .with_features(invoice_features);
1577 let route_params = RouteParameters {
1579 final_value_msat: amt_msat,
1580 final_cltv_expiry_delta: TEST_FINAL_CLTV,
1582 let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
1584 macro_rules! pass_failed_attempt_with_retry_along_path {
1585 ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => {
1586 // Send a payment attempt that fails due to lack of liquidity on the second hop
1587 check_added_monitors!(nodes[0], 1);
1588 let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1589 let mut update_add = update_0.update_add_htlcs[0].clone();
1590 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
1591 commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
1592 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1593 nodes[1].node.process_pending_htlc_forwards();
1594 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
1595 vec![HTLCDestination::NextHopChannel {
1596 node_id: Some(nodes[2].node.get_our_node_id()),
1597 channel_id: $failing_channel_id,
1599 nodes[1].node.process_pending_htlc_forwards();
1600 let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1601 check_added_monitors!(&nodes[1], 1);
1602 assert!(update_1.update_fail_htlcs.len() == 1);
1603 let fail_msg = update_1.update_fail_htlcs[0].clone();
1604 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
1605 commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
1607 // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry
1608 let mut events = nodes[0].node.get_and_clear_pending_events();
1609 assert_eq!(events.len(), 2);
1611 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
1612 assert_eq!(payment_hash, ev_payment_hash);
1613 assert_eq!(payment_failed_permanently, false);
1615 _ => panic!("Unexpected event"),
1617 if $expect_pending_htlcs_forwardable {
1619 Event::PendingHTLCsForwardable { .. } => {},
1620 _ => panic!("Unexpected event"),
1624 Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => {
1625 assert_eq!(payment_hash, ev_payment_hash);
1627 _ => panic!("Unexpected event"),
1633 if test == AutoRetry::Success {
1634 // Test that we can succeed on the first retry.
1635 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1636 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1638 // Open a new channel with liquidity on the second hop so we can find a route for the retry
1639 // attempt, since the initial second hop channel will be excluded from pathfinding
1640 create_announced_chan_between_nodes(&nodes, 1, 2);
1642 // We retry payments in `process_pending_htlc_forwards`
1643 nodes[0].node.process_pending_htlc_forwards();
1644 check_added_monitors!(nodes[0], 1);
1645 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1646 assert_eq!(msg_events.len(), 1);
1647 pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None);
1648 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
1649 } else if test == AutoRetry::Spontaneous {
1650 nodes[0].node.send_spontaneous_payment_with_retry(Some(payment_preimage), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1651 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1653 // Open a new channel with liquidity on the second hop so we can find a route for the retry
1654 // attempt, since the initial second hop channel will be excluded from pathfinding
1655 create_announced_chan_between_nodes(&nodes, 1, 2);
1657 // We retry payments in `process_pending_htlc_forwards`
1658 nodes[0].node.process_pending_htlc_forwards();
1659 check_added_monitors!(nodes[0], 1);
1660 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1661 assert_eq!(msg_events.len(), 1);
1662 pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, None, msg_events.pop().unwrap(), true, Some(payment_preimage));
1663 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
1664 } else if test == AutoRetry::FailAttempts {
1665 // Ensure ChannelManager will not retry a payment if it has run out of payment attempts.
1666 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1667 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1669 // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
1670 // the retry attempt, since the initial second hop channel will be excluded from pathfinding
1671 let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1673 // We retry payments in `process_pending_htlc_forwards`
1674 nodes[0].node.process_pending_htlc_forwards();
1675 pass_failed_attempt_with_retry_along_path!(channel_id_3, false);
1677 // Ensure we won't retry a second time.
1678 nodes[0].node.process_pending_htlc_forwards();
1679 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1680 assert_eq!(msg_events.len(), 0);
1681 } else if test == AutoRetry::FailTimeout {
1682 #[cfg(not(feature = "no-std"))] {
1683 // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
1684 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
1685 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1687 // Advance the time so the second attempt fails due to timeout.
1688 SinceEpoch::advance(Duration::from_secs(61));
1690 // Make sure we don't retry again.
1691 nodes[0].node.process_pending_htlc_forwards();
1692 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1693 assert_eq!(msg_events.len(), 0);
1695 let mut events = nodes[0].node.get_and_clear_pending_events();
1696 assert_eq!(events.len(), 1);
1698 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1699 assert_eq!(payment_hash, *ev_payment_hash);
1700 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1702 _ => panic!("Unexpected event"),
1705 } else if test == AutoRetry::FailOnRestart {
1706 // Ensure ChannelManager will not retry a payment after restart, even if there were retry
1707 // attempts remaining prior to restart.
1708 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap();
1709 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1711 // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
1712 // the retry attempt, since the initial second hop channel will be excluded from pathfinding
1713 let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1715 // Ensure the first retry attempt fails, with 1 retry attempt remaining
1716 nodes[0].node.process_pending_htlc_forwards();
1717 pass_failed_attempt_with_retry_along_path!(channel_id_3, true);
1719 // Restart the node and ensure that ChannelManager does not use its remaining retry attempt
1720 let node_encoded = nodes[0].node.encode();
1721 let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode();
1722 reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized);
1724 let mut events = nodes[0].node.get_and_clear_pending_events();
1725 expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true);
1726 // Make sure we don't retry again.
1727 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1728 assert_eq!(msg_events.len(), 0);
1730 let mut events = nodes[0].node.get_and_clear_pending_events();
1731 assert_eq!(events.len(), 1);
1733 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1734 assert_eq!(payment_hash, *ev_payment_hash);
1735 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1737 _ => panic!("Unexpected event"),
1739 } else if test == AutoRetry::FailOnRetry {
1740 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1741 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1743 // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should
1744 // fail to find a route.
1745 nodes[0].node.process_pending_htlc_forwards();
1746 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1747 assert_eq!(msg_events.len(), 0);
1749 let mut events = nodes[0].node.get_and_clear_pending_events();
1750 assert_eq!(events.len(), 1);
1752 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1753 assert_eq!(payment_hash, *ev_payment_hash);
1754 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1756 _ => panic!("Unexpected event"),
1762 fn auto_retry_partial_failure() {
1763 // Test that we'll retry appropriately on send partial failure and retry partial failure.
1764 let chanmon_cfgs = create_chanmon_cfgs(2);
1765 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1766 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1767 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1769 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1770 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1771 let chan_3_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1773 // Marshall data to send the payment
1774 let amt_msat = 20_000;
1775 let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
1776 #[cfg(feature = "std")]
1777 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1778 #[cfg(not(feature = "std"))]
1779 let payment_expiry_secs = 60 * 60;
1780 let mut invoice_features = InvoiceFeatures::empty();
1781 invoice_features.set_variable_length_onion_required();
1782 invoice_features.set_payment_secret_required();
1783 invoice_features.set_basic_mpp_optional();
1784 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
1785 .with_expiry_time(payment_expiry_secs as u64)
1786 .with_features(invoice_features);
1787 let route_params = RouteParameters {
1789 final_value_msat: amt_msat,
1790 final_cltv_expiry_delta: TEST_FINAL_CLTV,
1793 // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
1794 // second (for the initial send path2 over chan_2) fails.
1795 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1796 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1797 // Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
1798 // the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
1799 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1800 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1801 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1803 // Configure the initial send, retry1 and retry2's paths.
1804 let send_route = Route {
1807 pubkey: nodes[1].node.get_our_node_id(),
1808 node_features: nodes[1].node.node_features(),
1809 short_channel_id: chan_1_id,
1810 channel_features: nodes[1].node.channel_features(),
1811 fee_msat: amt_msat / 2,
1812 cltv_expiry_delta: 100,
1815 pubkey: nodes[1].node.get_our_node_id(),
1816 node_features: nodes[1].node.node_features(),
1817 short_channel_id: chan_2_id,
1818 channel_features: nodes[1].node.channel_features(),
1819 fee_msat: amt_msat / 2,
1820 cltv_expiry_delta: 100,
1823 payment_params: Some(route_params.payment_params.clone()),
1825 let retry_1_route = Route {
1828 pubkey: nodes[1].node.get_our_node_id(),
1829 node_features: nodes[1].node.node_features(),
1830 short_channel_id: chan_1_id,
1831 channel_features: nodes[1].node.channel_features(),
1832 fee_msat: amt_msat / 4,
1833 cltv_expiry_delta: 100,
1836 pubkey: nodes[1].node.get_our_node_id(),
1837 node_features: nodes[1].node.node_features(),
1838 short_channel_id: chan_3_id,
1839 channel_features: nodes[1].node.channel_features(),
1840 fee_msat: amt_msat / 4,
1841 cltv_expiry_delta: 100,
1844 payment_params: Some(route_params.payment_params.clone()),
1846 let retry_2_route = Route {
1849 pubkey: nodes[1].node.get_our_node_id(),
1850 node_features: nodes[1].node.node_features(),
1851 short_channel_id: chan_1_id,
1852 channel_features: nodes[1].node.channel_features(),
1853 fee_msat: amt_msat / 4,
1854 cltv_expiry_delta: 100,
1857 payment_params: Some(route_params.payment_params.clone()),
1859 nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
1860 nodes[0].router.expect_find_route(RouteParameters {
1861 payment_params: route_params.payment_params.clone(),
1862 final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV
1863 }, Ok(retry_1_route));
1864 nodes[0].router.expect_find_route(RouteParameters {
1865 payment_params: route_params.payment_params.clone(),
1866 final_value_msat: amt_msat / 4, final_cltv_expiry_delta: TEST_FINAL_CLTV
1867 }, Ok(retry_2_route));
1869 // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
1870 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
1871 let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
1872 assert_eq!(closed_chan_events.len(), 2);
1873 match closed_chan_events[0] {
1874 Event::ChannelClosed { .. } => {},
1875 _ => panic!("Unexpected event"),
1877 match closed_chan_events[1] {
1878 Event::ChannelClosed { .. } => {},
1879 _ => panic!("Unexpected event"),
1882 // Pass the first part of the payment along the path.
1883 check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
1884 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1886 // First message is the first update_add, remaining messages are broadcasting channel updates and
1887 // errors for the permfailed channels
1888 assert_eq!(msg_events.len(), 5);
1889 let mut payment_event = SendEvent::from_event(msg_events.remove(0));
1891 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1892 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1893 check_added_monitors!(nodes[1], 1);
1894 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1896 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
1897 check_added_monitors!(nodes[0], 1);
1898 let as_second_htlc_updates = SendEvent::from_node(&nodes[0]);
1900 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
1901 check_added_monitors!(nodes[0], 1);
1902 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1904 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
1905 check_added_monitors!(nodes[1], 1);
1907 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]);
1908 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]);
1909 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg);
1910 check_added_monitors!(nodes[1], 1);
1911 let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1913 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
1914 check_added_monitors!(nodes[0], 1);
1916 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
1917 check_added_monitors!(nodes[0], 1);
1918 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1920 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
1921 check_added_monitors!(nodes[1], 1);
1923 expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1924 nodes[1].node.process_pending_htlc_forwards();
1925 expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat);
1926 nodes[1].node.claim_funds(payment_preimage);
1927 expect_payment_claimed!(nodes[1], payment_hash, amt_msat);
1928 let bs_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1929 assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1);
1931 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]);
1932 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed);
1933 check_added_monitors!(nodes[0], 1);
1934 let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1936 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
1937 check_added_monitors!(nodes[1], 4);
1938 let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1940 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
1941 check_added_monitors!(nodes[1], 1);
1942 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1944 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
1945 check_added_monitors!(nodes[0], 1);
1947 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]);
1948 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]);
1949 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed);
1950 check_added_monitors!(nodes[0], 1);
1951 let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1953 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_fourth_raa);
1954 check_added_monitors!(nodes[1], 1);
1956 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
1957 check_added_monitors!(nodes[1], 1);
1958 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1960 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
1961 check_added_monitors!(nodes[0], 1);
1962 expect_payment_sent!(nodes[0], payment_preimage);
1966 fn auto_retry_zero_attempts_send_error() {
1967 let chanmon_cfgs = create_chanmon_cfgs(2);
1968 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1969 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1970 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1972 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1973 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1975 // Marshall data to send the payment
1976 let amt_msat = 20_000;
1977 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
1978 #[cfg(feature = "std")]
1979 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1980 #[cfg(not(feature = "std"))]
1981 let payment_expiry_secs = 60 * 60;
1982 let mut invoice_features = InvoiceFeatures::empty();
1983 invoice_features.set_variable_length_onion_required();
1984 invoice_features.set_payment_secret_required();
1985 invoice_features.set_basic_mpp_optional();
1986 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
1987 .with_expiry_time(payment_expiry_secs as u64)
1988 .with_features(invoice_features);
1989 let route_params = RouteParameters {
1991 final_value_msat: amt_msat,
1992 final_cltv_expiry_delta: TEST_FINAL_CLTV,
1995 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1996 let err = nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap_err();
1997 if let PaymentSendFailure::AllFailedResendSafe(_) = err {
1998 } else { panic!("Unexpected error"); }
1999 assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
2000 assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 1); // channel close event
2001 check_added_monitors!(nodes[0], 2);
2005 fn fails_paying_after_rejected_by_payee() {
2006 let chanmon_cfgs = create_chanmon_cfgs(2);
2007 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2008 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2009 let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2011 create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2013 // Marshall data to send the payment
2014 let amt_msat = 20_000;
2015 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2016 #[cfg(feature = "std")]
2017 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2018 #[cfg(not(feature = "std"))]
2019 let payment_expiry_secs = 60 * 60;
2020 let mut invoice_features = InvoiceFeatures::empty();
2021 invoice_features.set_variable_length_onion_required();
2022 invoice_features.set_payment_secret_required();
2023 invoice_features.set_basic_mpp_optional();
2024 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2025 .with_expiry_time(payment_expiry_secs as u64)
2026 .with_features(invoice_features);
2027 let route_params = RouteParameters {
2029 final_value_msat: amt_msat,
2030 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2033 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2034 check_added_monitors!(nodes[0], 1);
2035 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2036 assert_eq!(events.len(), 1);
2037 let mut payment_event = SendEvent::from_event(events.pop().unwrap());
2038 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
2039 check_added_monitors!(nodes[1], 0);
2040 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
2041 expect_pending_htlcs_forwardable!(nodes[1]);
2042 expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat);
2044 nodes[1].node.fail_htlc_backwards(&payment_hash);
2045 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]);
2046 pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash);
2050 fn retry_multi_path_single_failed_payment() {
2051 // Tests that we can/will retry after a single path of an MPP payment failed immediately
2052 let chanmon_cfgs = create_chanmon_cfgs(2);
2053 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2054 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
2055 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2057 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2058 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2060 let amt_msat = 100_010_000;
2062 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2063 #[cfg(feature = "std")]
2064 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2065 #[cfg(not(feature = "std"))]
2066 let payment_expiry_secs = 60 * 60;
2067 let mut invoice_features = InvoiceFeatures::empty();
2068 invoice_features.set_variable_length_onion_required();
2069 invoice_features.set_payment_secret_required();
2070 invoice_features.set_basic_mpp_optional();
2071 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2072 .with_expiry_time(payment_expiry_secs as u64)
2073 .with_features(invoice_features);
2074 let route_params = RouteParameters {
2075 payment_params: payment_params.clone(),
2076 final_value_msat: amt_msat,
2077 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2080 let chans = nodes[0].node.list_usable_channels();
2081 let mut route = Route {
2084 pubkey: nodes[1].node.get_our_node_id(),
2085 node_features: nodes[1].node.node_features(),
2086 short_channel_id: chans[0].short_channel_id.unwrap(),
2087 channel_features: nodes[1].node.channel_features(),
2089 cltv_expiry_delta: 100,
2092 pubkey: nodes[1].node.get_our_node_id(),
2093 node_features: nodes[1].node.node_features(),
2094 short_channel_id: chans[1].short_channel_id.unwrap(),
2095 channel_features: nodes[1].node.channel_features(),
2096 fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
2097 cltv_expiry_delta: 100,
2100 payment_params: Some(payment_params),
2102 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2103 // On retry, split the payment across both channels.
2104 route.paths[0][0].fee_msat = 50_000_001;
2105 route.paths[1][0].fee_msat = 50_000_000;
2106 nodes[0].router.expect_find_route(RouteParameters {
2107 payment_params: route.payment_params.clone().unwrap(),
2108 // Note that the second request here requests the amount we originally failed to send,
2109 // not the amount remaining on the full payment, which should be changed.
2110 final_value_msat: 100_000_001, final_cltv_expiry_delta: TEST_FINAL_CLTV
2111 }, Ok(route.clone()));
2114 let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
2115 // The initial send attempt, 2 paths
2116 scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2117 scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2118 // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in.
2119 scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown });
2120 scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2123 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2124 let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2125 assert_eq!(htlc_msgs.len(), 2);
2126 check_added_monitors!(nodes[0], 2);
2130 fn immediate_retry_on_failure() {
2131 // Tests that we can/will retry immediately after a failure
2132 let chanmon_cfgs = create_chanmon_cfgs(2);
2133 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2134 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
2135 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2137 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2138 create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2140 let amt_msat = 100_000_001;
2141 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2142 #[cfg(feature = "std")]
2143 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2144 #[cfg(not(feature = "std"))]
2145 let payment_expiry_secs = 60 * 60;
2146 let mut invoice_features = InvoiceFeatures::empty();
2147 invoice_features.set_variable_length_onion_required();
2148 invoice_features.set_payment_secret_required();
2149 invoice_features.set_basic_mpp_optional();
2150 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2151 .with_expiry_time(payment_expiry_secs as u64)
2152 .with_features(invoice_features);
2153 let route_params = RouteParameters {
2155 final_value_msat: amt_msat,
2156 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2159 let chans = nodes[0].node.list_usable_channels();
2160 let mut route = Route {
2163 pubkey: nodes[1].node.get_our_node_id(),
2164 node_features: nodes[1].node.node_features(),
2165 short_channel_id: chans[0].short_channel_id.unwrap(),
2166 channel_features: nodes[1].node.channel_features(),
2167 fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
2168 cltv_expiry_delta: 100,
2171 payment_params: Some(PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)),
2173 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2174 // On retry, split the payment across both channels.
2175 route.paths.push(route.paths[0].clone());
2176 route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap();
2177 route.paths[0][0].fee_msat = 50_000_000;
2178 route.paths[1][0].fee_msat = 50_000_001;
2179 nodes[0].router.expect_find_route(RouteParameters {
2180 payment_params: route_params.payment_params.clone(),
2181 final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV
2182 }, Ok(route.clone()));
2184 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2185 let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2186 assert_eq!(htlc_msgs.len(), 2);
2187 check_added_monitors!(nodes[0], 2);
2191 fn no_extra_retries_on_back_to_back_fail() {
2192 // In a previous release, we had a race where we may exceed the payment retry count if we
2193 // get two failures in a row with the second having `all_paths_failed` set.
2194 // Generally, when we give up trying to retry a payment, we don't know for sure what the
2195 // current state of the ChannelManager event queue is. Specifically, we cannot be sure that
2196 // there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events
2197 // pending which we will see later. Thus, when we previously removed the retry tracking map
2198 // entry after a `all_paths_failed` `PaymentPathFailed` event, we may have dropped the
2199 // retry entry even though more events for the same payment were still pending. This led to
2200 // us retrying a payment again even though we'd already given up on it.
2202 // We now have a separate event - `PaymentFailed` which indicates no HTLCs remain and which
2203 // is used to remove the payment retry counter entries instead. This tests for the specific
2204 // excess-retry case while also testing `PaymentFailed` generation.
2206 let chanmon_cfgs = create_chanmon_cfgs(3);
2207 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2208 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2209 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2211 let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2212 let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
2214 let amt_msat = 200_000_000;
2215 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2216 #[cfg(feature = "std")]
2217 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2218 #[cfg(not(feature = "std"))]
2219 let payment_expiry_secs = 60 * 60;
2220 let mut invoice_features = InvoiceFeatures::empty();
2221 invoice_features.set_variable_length_onion_required();
2222 invoice_features.set_payment_secret_required();
2223 invoice_features.set_basic_mpp_optional();
2224 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2225 .with_expiry_time(payment_expiry_secs as u64)
2226 .with_features(invoice_features);
2227 let route_params = RouteParameters {
2229 final_value_msat: amt_msat,
2230 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2233 let mut route = Route {
2236 pubkey: nodes[1].node.get_our_node_id(),
2237 node_features: nodes[1].node.node_features(),
2238 short_channel_id: chan_1_scid,
2239 channel_features: nodes[1].node.channel_features(),
2240 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2241 cltv_expiry_delta: 100,
2243 pubkey: nodes[2].node.get_our_node_id(),
2244 node_features: nodes[2].node.node_features(),
2245 short_channel_id: chan_2_scid,
2246 channel_features: nodes[2].node.channel_features(),
2247 fee_msat: 100_000_000,
2248 cltv_expiry_delta: 100,
2251 pubkey: nodes[1].node.get_our_node_id(),
2252 node_features: nodes[1].node.node_features(),
2253 short_channel_id: chan_1_scid,
2254 channel_features: nodes[1].node.channel_features(),
2255 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2256 cltv_expiry_delta: 100,
2258 pubkey: nodes[2].node.get_our_node_id(),
2259 node_features: nodes[2].node.node_features(),
2260 short_channel_id: chan_2_scid,
2261 channel_features: nodes[2].node.channel_features(),
2262 fee_msat: 100_000_000,
2263 cltv_expiry_delta: 100,
2266 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2268 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2269 let mut second_payment_params = route_params.payment_params.clone();
2270 second_payment_params.previously_failed_channels = vec![chan_2_scid, chan_2_scid];
2271 // On retry, we'll only return one path
2272 route.paths.remove(1);
2273 route.paths[0][1].fee_msat = amt_msat;
2274 nodes[0].router.expect_find_route(RouteParameters {
2275 payment_params: second_payment_params,
2276 final_value_msat: amt_msat, final_cltv_expiry_delta: TEST_FINAL_CLTV,
2277 }, Ok(route.clone()));
2279 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2280 let htlc_updates = SendEvent::from_node(&nodes[0]);
2281 check_added_monitors!(nodes[0], 1);
2282 assert_eq!(htlc_updates.msgs.len(), 1);
2284 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
2285 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
2286 check_added_monitors!(nodes[1], 1);
2287 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2289 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2290 check_added_monitors!(nodes[0], 1);
2291 let second_htlc_updates = SendEvent::from_node(&nodes[0]);
2293 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2294 check_added_monitors!(nodes[0], 1);
2295 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2297 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
2298 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
2299 check_added_monitors!(nodes[1], 1);
2300 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2302 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2303 check_added_monitors!(nodes[1], 1);
2304 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2306 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2307 check_added_monitors!(nodes[0], 1);
2309 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2310 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
2311 check_added_monitors!(nodes[0], 1);
2312 let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2314 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2315 check_added_monitors!(nodes[1], 1);
2316 let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2318 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
2319 check_added_monitors!(nodes[1], 1);
2320 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2322 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]);
2323 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed);
2324 check_added_monitors!(nodes[0], 1);
2326 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2327 check_added_monitors!(nodes[0], 1);
2328 let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2330 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
2331 check_added_monitors!(nodes[1], 1);
2332 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
2333 check_added_monitors!(nodes[1], 1);
2334 let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2336 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_fourth_raa);
2337 check_added_monitors!(nodes[0], 1);
2339 // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two
2340 // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second
2343 // Previously, we retried payments in an event consumer, which would retry each
2344 // `PaymentPathFailed` individually. In that setup, we had retried the payment in response to
2345 // the first `PaymentPathFailed`, then seen the second `PaymentPathFailed` with
2346 // `all_paths_failed` set and assumed the payment was completely failed. We ultimately fixed it
2347 // by adding the `PaymentFailed` event.
2349 // Because we now retry payments as a batch, we simply return a single-path route in the
2350 // second, batched, request, have that fail, ensure the payment was abandoned.
2351 let mut events = nodes[0].node.get_and_clear_pending_events();
2352 assert_eq!(events.len(), 3);
2354 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
2355 assert_eq!(payment_hash, ev_payment_hash);
2356 assert_eq!(payment_failed_permanently, false);
2358 _ => panic!("Unexpected event"),
2361 Event::PendingHTLCsForwardable { .. } => {},
2362 _ => panic!("Unexpected event"),
2365 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
2366 assert_eq!(payment_hash, ev_payment_hash);
2367 assert_eq!(payment_failed_permanently, false);
2369 _ => panic!("Unexpected event"),
2372 nodes[0].node.process_pending_htlc_forwards();
2373 let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
2374 check_added_monitors!(nodes[0], 1);
2376 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
2377 commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
2378 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2379 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2380 commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true);
2382 let mut events = nodes[0].node.get_and_clear_pending_events();
2383 assert_eq!(events.len(), 2);
2385 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
2386 assert_eq!(payment_hash, ev_payment_hash);
2387 assert_eq!(payment_failed_permanently, false);
2389 _ => panic!("Unexpected event"),
2392 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
2393 assert_eq!(payment_hash, *ev_payment_hash);
2394 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
2396 _ => panic!("Unexpected event"),
2401 fn test_simple_partial_retry() {
2402 // In the first version of the in-`ChannelManager` payment retries, retries were sent for the
2403 // full amount of the payment, rather than only the missing amount. Here we simply test for
2404 // this by sending a payment with two parts, failing one, and retrying the second. Note that
2405 // `TestRouter` will check that the `RouteParameters` (which contain the amount) matches the
2407 let chanmon_cfgs = create_chanmon_cfgs(3);
2408 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2409 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2410 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2412 let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2413 let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
2415 let amt_msat = 200_000_000;
2416 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
2417 #[cfg(feature = "std")]
2418 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2419 #[cfg(not(feature = "std"))]
2420 let payment_expiry_secs = 60 * 60;
2421 let mut invoice_features = InvoiceFeatures::empty();
2422 invoice_features.set_variable_length_onion_required();
2423 invoice_features.set_payment_secret_required();
2424 invoice_features.set_basic_mpp_optional();
2425 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2426 .with_expiry_time(payment_expiry_secs as u64)
2427 .with_features(invoice_features);
2428 let route_params = RouteParameters {
2430 final_value_msat: amt_msat,
2431 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2434 let mut route = Route {
2437 pubkey: nodes[1].node.get_our_node_id(),
2438 node_features: nodes[1].node.node_features(),
2439 short_channel_id: chan_1_scid,
2440 channel_features: nodes[1].node.channel_features(),
2441 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2442 cltv_expiry_delta: 100,
2444 pubkey: nodes[2].node.get_our_node_id(),
2445 node_features: nodes[2].node.node_features(),
2446 short_channel_id: chan_2_scid,
2447 channel_features: nodes[2].node.channel_features(),
2448 fee_msat: 100_000_000,
2449 cltv_expiry_delta: 100,
2452 pubkey: nodes[1].node.get_our_node_id(),
2453 node_features: nodes[1].node.node_features(),
2454 short_channel_id: chan_1_scid,
2455 channel_features: nodes[1].node.channel_features(),
2457 cltv_expiry_delta: 100,
2459 pubkey: nodes[2].node.get_our_node_id(),
2460 node_features: nodes[2].node.node_features(),
2461 short_channel_id: chan_2_scid,
2462 channel_features: nodes[2].node.channel_features(),
2463 fee_msat: 100_000_000,
2464 cltv_expiry_delta: 100,
2467 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2469 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2470 let mut second_payment_params = route_params.payment_params.clone();
2471 second_payment_params.previously_failed_channels = vec![chan_2_scid];
2472 // On retry, we'll only be asked for one path (or 100k sats)
2473 route.paths.remove(0);
2474 nodes[0].router.expect_find_route(RouteParameters {
2475 payment_params: second_payment_params,
2476 final_value_msat: amt_msat / 2, final_cltv_expiry_delta: TEST_FINAL_CLTV,
2477 }, Ok(route.clone()));
2479 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2480 let htlc_updates = SendEvent::from_node(&nodes[0]);
2481 check_added_monitors!(nodes[0], 1);
2482 assert_eq!(htlc_updates.msgs.len(), 1);
2484 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
2485 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
2486 check_added_monitors!(nodes[1], 1);
2487 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2489 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2490 check_added_monitors!(nodes[0], 1);
2491 let second_htlc_updates = SendEvent::from_node(&nodes[0]);
2493 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2494 check_added_monitors!(nodes[0], 1);
2495 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2497 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
2498 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
2499 check_added_monitors!(nodes[1], 1);
2500 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2502 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2503 check_added_monitors!(nodes[1], 1);
2504 let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2506 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2507 check_added_monitors!(nodes[0], 1);
2509 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2510 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
2511 check_added_monitors!(nodes[0], 1);
2512 let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2514 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2515 check_added_monitors!(nodes[1], 1);
2517 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
2518 check_added_monitors!(nodes[1], 1);
2520 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2522 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2523 check_added_monitors!(nodes[0], 1);
2525 let mut events = nodes[0].node.get_and_clear_pending_events();
2526 assert_eq!(events.len(), 2);
2528 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => {
2529 assert_eq!(payment_hash, ev_payment_hash);
2530 assert_eq!(payment_failed_permanently, false);
2532 _ => panic!("Unexpected event"),
2535 Event::PendingHTLCsForwardable { .. } => {},
2536 _ => panic!("Unexpected event"),
2539 nodes[0].node.process_pending_htlc_forwards();
2540 let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
2541 check_added_monitors!(nodes[0], 1);
2543 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
2544 commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
2546 expect_pending_htlcs_forwardable!(nodes[1]);
2547 check_added_monitors!(nodes[1], 1);
2549 let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
2550 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
2551 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
2552 commitment_signed_dance!(nodes[2], nodes[1], &bs_forward_update.commitment_signed, false);
2554 expect_pending_htlcs_forwardable!(nodes[2]);
2555 expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat);
2559 #[cfg(feature = "std")]
2560 fn test_threaded_payment_retries() {
2561 // In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
2562 // a single thread and would happily let multiple threads run retries at the same time. Because
2563 // retries are done by first calculating the amount we need to retry, then dropping the
2564 // relevant lock, then actually sending, we would happily let multiple threads retry the same
2565 // amount at the same time, overpaying our original HTLC!
2566 let chanmon_cfgs = create_chanmon_cfgs(4);
2567 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2568 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2569 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2571 // There is one mitigating guardrail when retrying payments - we can never over-pay by more
2572 // than 10% of the original value. Thus, we want all our retries to be below that. In order to
2573 // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
2574 // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
2576 let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2577 create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0);
2578 let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id;
2579 let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id;
2581 let amt_msat = 100_000_000;
2582 let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
2583 #[cfg(feature = "std")]
2584 let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2585 #[cfg(not(feature = "std"))]
2586 let payment_expiry_secs = 60 * 60;
2587 let mut invoice_features = InvoiceFeatures::empty();
2588 invoice_features.set_variable_length_onion_required();
2589 invoice_features.set_payment_secret_required();
2590 invoice_features.set_basic_mpp_optional();
2591 let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2592 .with_expiry_time(payment_expiry_secs as u64)
2593 .with_features(invoice_features);
2594 let mut route_params = RouteParameters {
2596 final_value_msat: amt_msat,
2597 final_cltv_expiry_delta: TEST_FINAL_CLTV,
2600 let mut route = Route {
2603 pubkey: nodes[1].node.get_our_node_id(),
2604 node_features: nodes[1].node.node_features(),
2605 short_channel_id: chan_1_scid,
2606 channel_features: nodes[1].node.channel_features(),
2608 cltv_expiry_delta: 100,
2610 pubkey: nodes[3].node.get_our_node_id(),
2611 node_features: nodes[2].node.node_features(),
2612 short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown
2613 channel_features: nodes[2].node.channel_features(),
2614 fee_msat: amt_msat / 1000,
2615 cltv_expiry_delta: 100,
2618 pubkey: nodes[2].node.get_our_node_id(),
2619 node_features: nodes[2].node.node_features(),
2620 short_channel_id: chan_3_scid,
2621 channel_features: nodes[2].node.channel_features(),
2623 cltv_expiry_delta: 100,
2625 pubkey: nodes[3].node.get_our_node_id(),
2626 node_features: nodes[3].node.node_features(),
2627 short_channel_id: chan_4_scid,
2628 channel_features: nodes[3].node.channel_features(),
2629 fee_msat: amt_msat - amt_msat / 1000,
2630 cltv_expiry_delta: 100,
2633 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2635 nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2637 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap();
2638 check_added_monitors!(nodes[0], 2);
2639 let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2640 assert_eq!(send_msg_events.len(), 2);
2641 send_msg_events.retain(|msg|
2642 if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg {
2643 // Drop the commitment update for nodes[2], we can just let that one sit pending
2645 *node_id == nodes[1].node.get_our_node_id()
2646 } else { panic!(); }
2649 // from here on out, the retry `RouteParameters` amount will be amt/1000
2650 route_params.final_value_msat /= 1000;
2653 let end_time = Instant::now() + Duration::from_secs(1);
2654 macro_rules! thread_body { () => { {
2655 // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
2656 let node_ref = NodePtr::from_node(&nodes[0]);
2658 let node_a = unsafe { &*node_ref.0 };
2659 while Instant::now() < end_time {
2660 node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
2661 // Ignore if we have any pending events, just always pretend we just got a
2662 // PendingHTLCsForwardable
2663 node_a.node.process_pending_htlc_forwards();
2667 let mut threads = Vec::new();
2668 for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); }
2670 // Back in the main thread, poll pending messages and make sure that we never have more than
2671 // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
2672 // there are HTLC messages shoved in while its running. This allows us to test that we never
2673 // generate an additional update_add_htlc until we've fully failed the first.
2674 let mut previously_failed_channels = Vec::new();
2676 assert_eq!(send_msg_events.len(), 1);
2677 let send_event = SendEvent::from_event(send_msg_events.pop().unwrap());
2678 assert_eq!(send_event.msgs.len(), 1);
2680 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2681 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
2683 // Note that we only push one route into `expect_find_route` at a time, because that's all
2684 // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
2685 // we should still ultimately fail for the same reason - because we're trying to send too
2686 // many HTLCs at once.
2687 let mut new_route_params = route_params.clone();
2688 previously_failed_channels.push(route.paths[0][1].short_channel_id);
2689 new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
2690 route.paths[0][1].short_channel_id += 1;
2691 nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
2693 let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2694 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
2695 // The "normal" commitment_signed_dance delivers the final RAA and then calls
2696 // `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
2697 // This races with our other threads which may generate an add-HTLCs commitment update via
2698 // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
2699 // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
2700 let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true);
2701 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &last_raa);
2703 let cur_time = Instant::now();
2704 if cur_time > end_time {
2705 for thread in threads.drain(..) { thread.join().unwrap(); }
2708 // Make sure we have some events to handle when we go around...
2709 nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
2710 nodes[0].node.process_pending_htlc_forwards();
2711 send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2712 check_added_monitors!(nodes[0], 2);
2714 if cur_time > end_time {