8d5c4b54b7b33809016557e6e6508f1af1ad55bb
[rust-lightning] / lightning / src / ln / payment_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test the payment retry logic in ChannelManager, including various edge-cases around
11 //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
12 //! payments thereafter.
13
14 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
15 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
16 use crate::chain::keysinterface::EntropySource;
17 use crate::chain::transaction::OutPoint;
18 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
19 use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails};
20 use crate::ln::features::InvoiceFeatures;
21 use crate::ln::msgs;
22 use crate::ln::msgs::ChannelMessageHandler;
23 use crate::ln::outbound_payment::Retry;
24 use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
25 use crate::routing::router::{get_route, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters};
26 use crate::routing::scoring::ChannelUsage;
27 use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure};
28 use crate::util::test_utils;
29 use crate::util::errors::APIError;
30 use crate::util::ser::Writeable;
31
32 use bitcoin::{Block, BlockHeader, TxMerkleNode};
33 use bitcoin::hashes::Hash;
34 use bitcoin::network::constants::Network;
35
36 use crate::prelude::*;
37
38 use crate::ln::functional_test_utils::*;
39 use crate::routing::gossip::NodeId;
40 #[cfg(feature = "std")]
41 use {
42         crate::util::time::tests::SinceEpoch,
43         std::time::{SystemTime, Instant, Duration}
44 };
45
46 #[test]
47 fn mpp_failure() {
48         let chanmon_cfgs = create_chanmon_cfgs(4);
49         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
50         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
51         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
52
53         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
54         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
55         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
56         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
57
58         let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
59         let path = route.paths[0].clone();
60         route.paths.push(path);
61         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
62         route.paths[0][0].short_channel_id = chan_1_id;
63         route.paths[0][1].short_channel_id = chan_3_id;
64         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
65         route.paths[1][0].short_channel_id = chan_2_id;
66         route.paths[1][1].short_channel_id = chan_4_id;
67         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
68         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
69 }
70
71 #[test]
72 fn mpp_retry() {
73         let chanmon_cfgs = create_chanmon_cfgs(4);
74         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
75         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
76         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
77
78         let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
79         let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
80         let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
81         let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2);
82         // Rebalance
83         send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
84
85         let amt_msat = 1_000_000;
86         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], amt_msat);
87         let path = route.paths[0].clone();
88         route.paths.push(path);
89         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
90         route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
91         route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
92         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
93         route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
94         route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
95
96         // Initiate the MPP payment.
97         let payment_id = PaymentId(payment_hash.0);
98         let mut route_params = RouteParameters {
99                 payment_params: route.payment_params.clone().unwrap(),
100                 final_value_msat: amt_msat,
101         };
102
103         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
104         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), payment_id, route_params.clone(), Retry::Attempts(1)).unwrap();
105         check_added_monitors!(nodes[0], 2); // one monitor per path
106         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
107         assert_eq!(events.len(), 2);
108
109         // Pass half of the payment along the success path.
110         let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
111         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
112
113         // Add the HTLC along the first hop.
114         let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
115         let (update_add, commitment_signed) = match fail_path_msgs_1 {
116                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
117                         assert_eq!(update_add_htlcs.len(), 1);
118                         assert!(update_fail_htlcs.is_empty());
119                         assert!(update_fulfill_htlcs.is_empty());
120                         assert!(update_fail_malformed_htlcs.is_empty());
121                         assert!(update_fee.is_none());
122                         (update_add_htlcs[0].clone(), commitment_signed.clone())
123                 },
124                 _ => panic!("Unexpected event"),
125         };
126         nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
127         commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
128
129         // Attempt to forward the payment and complete the 2nd path's failure.
130         expect_pending_htlcs_forwardable!(&nodes[2]);
131         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
132         let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
133         assert!(htlc_updates.update_add_htlcs.is_empty());
134         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
135         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
136         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
137         check_added_monitors!(nodes[2], 1);
138         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
139         commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
140         let mut events = nodes[0].node.get_and_clear_pending_events();
141         match events[1] {
142                 Event::PendingHTLCsForwardable { .. } => {},
143                 _ => panic!("Unexpected event")
144         }
145         events.remove(1);
146         expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
147
148         // Rebalance the channel so the second half of the payment can succeed.
149         send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
150
151         // Retry the second half of the payment and make sure it succeeds.
152         route.paths.remove(0);
153         route_params.final_value_msat = 1_000_000;
154         route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id);
155         nodes[0].router.expect_find_route(route_params, Ok(route));
156         nodes[0].node.process_pending_htlc_forwards();
157         check_added_monitors!(nodes[0], 1);
158         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
159         assert_eq!(events.len(), 1);
160         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
161         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
162 }
163
164 fn do_mpp_receive_timeout(send_partial_mpp: bool) {
165         let chanmon_cfgs = create_chanmon_cfgs(4);
166         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
167         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
168         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
169
170         let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
171         let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
172         let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3);
173         let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3);
174
175         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
176         let path = route.paths[0].clone();
177         route.paths.push(path);
178         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
179         route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
180         route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
181         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
182         route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
183         route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
184
185         // Initiate the MPP payment.
186         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
187         check_added_monitors!(nodes[0], 2); // one monitor per path
188         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
189         assert_eq!(events.len(), 2);
190
191         // Pass half of the payment along the first path.
192         let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
193         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None);
194
195         if send_partial_mpp {
196                 // Time out the partial MPP
197                 for _ in 0..MPP_TIMEOUT_TICKS {
198                         nodes[3].node.timer_tick_occurred();
199                 }
200
201                 // Failed HTLC from node 3 -> 1
202                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
203                 let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
204                 assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
205                 nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
206                 check_added_monitors!(nodes[3], 1);
207                 commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
208
209                 // Failed HTLC from node 1 -> 0
210                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
211                 let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
212                 assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
213                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
214                 check_added_monitors!(nodes[1], 1);
215                 commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
216
217                 expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
218         } else {
219                 // Pass half of the payment along the second path.
220                 let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
221                 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None);
222
223                 // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
224                 for _ in 0..MPP_TIMEOUT_TICKS {
225                         nodes[3].node.timer_tick_occurred();
226                 }
227
228                 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
229         }
230 }
231
232 #[test]
233 fn mpp_receive_timeout() {
234         do_mpp_receive_timeout(true);
235         do_mpp_receive_timeout(false);
236 }
237
238 #[test]
239 fn no_pending_leak_on_initial_send_failure() {
240         // In an earlier version of our payment tracking, we'd have a retry entry even when the initial
241         // HTLC for payment failed to send due to local channel errors (e.g. peer disconnected). In this
242         // case, the user wouldn't have a PaymentId to retry the payment with, but we'd think we have a
243         // pending payment forever and never time it out.
244         // Here we test exactly that - retrying a payment when a peer was disconnected on the first
245         // try, and then check that no pending payment is being tracked.
246         let chanmon_cfgs = create_chanmon_cfgs(2);
247         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
248         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
249         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
250
251         create_announced_chan_between_nodes(&nodes, 0, 1);
252
253         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
254
255         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
256         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
257
258         unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
259                 true, APIError::ChannelUnavailable { ref err },
260                 assert_eq!(err, "Peer for first hop currently disconnected"));
261
262         assert!(!nodes[0].node.has_pending_payments());
263 }
264
265 fn do_retry_with_no_persist(confirm_before_reload: bool) {
266         // If we send a pending payment and `send_payment` returns success, we should always either
267         // return a payment failure event or a payment success event, and on failure the payment should
268         // be retryable.
269         //
270         // In order to do so when the ChannelManager isn't immediately persisted (which is normal - its
271         // always persisted asynchronously), the ChannelManager has to reload some payment data from
272         // ChannelMonitor(s) in some cases. This tests that reloading.
273         //
274         // `confirm_before_reload` confirms the channel-closing commitment transaction on-chain prior
275         // to reloading the ChannelManager, increasing test coverage in ChannelMonitor HTLC tracking
276         // which has separate codepaths for "commitment transaction already confirmed" and not.
277         let chanmon_cfgs = create_chanmon_cfgs(3);
278         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
279         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
280         let persister: test_utils::TestPersister;
281         let new_chain_monitor: test_utils::TestChainMonitor;
282         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
283         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
284
285         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
286         let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
287
288         // Serialize the ChannelManager prior to sending payments
289         let nodes_0_serialized = nodes[0].node.encode();
290
291         // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
292         // out and retry.
293         let amt_msat = 1_000_000;
294         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
295         let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
296         let route_params = RouteParameters {
297                 payment_params: route.payment_params.clone().unwrap(),
298                 final_value_msat: amt_msat,
299         };
300         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
301         check_added_monitors!(nodes[0], 1);
302
303         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
304         assert_eq!(events.len(), 1);
305         let payment_event = SendEvent::from_event(events.pop().unwrap());
306         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
307
308         // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
309         // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
310         // which would prevent retry.
311         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
312         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
313
314         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
315         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
316         // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
317         let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
318
319         reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
320
321         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone();
322         if confirm_before_reload {
323                 mine_transaction(&nodes[0], &as_commitment_tx);
324                 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
325         }
326
327         // The ChannelMonitor should always be the latest version, as we're required to persist it
328         // during the `commitment_signed_dance!()`.
329         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
330         reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
331
332         // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
333         // force-close the channel.
334         check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
335         assert!(nodes[0].node.list_channels().is_empty());
336         assert!(nodes[0].node.has_pending_payments());
337         nodes[0].node.timer_tick_occurred();
338         if !confirm_before_reload {
339                 let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
340                 assert_eq!(as_broadcasted_txn.len(), 1);
341                 assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
342         } else {
343                 assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
344         }
345         check_added_monitors!(nodes[0], 1);
346
347         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
348         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
349         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
350
351         // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
352         // error, as the channel has hit the chain.
353         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
354         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
355         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
356         let as_err = nodes[0].node.get_and_clear_pending_msg_events();
357         assert_eq!(as_err.len(), 1);
358         match as_err[0] {
359                 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
360                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
361                         nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
362                         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
363                         check_added_monitors!(nodes[1], 1);
364                         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
365                 },
366                 _ => panic!("Unexpected event"),
367         }
368         check_closed_broadcast!(nodes[1], false);
369
370         // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when
371         // we close in a moment.
372         nodes[2].node.claim_funds(payment_preimage_1);
373         check_added_monitors!(nodes[2], 1);
374         expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
375
376         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
377         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
378         check_added_monitors!(nodes[1], 1);
379         commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
380         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
381
382         if confirm_before_reload {
383                 let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
384                 nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
385         }
386
387         // Create a new channel on which to retry the payment before we fail the payment via the
388         // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
389         // connecting several blocks while creating the channel (implying time has passed).
390         create_announced_chan_between_nodes(&nodes, 0, 1);
391         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
392
393         mine_transaction(&nodes[1], &as_commitment_tx);
394         let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
395         assert_eq!(bs_htlc_claim_txn.len(), 1);
396         check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
397
398         if !confirm_before_reload {
399                 mine_transaction(&nodes[0], &as_commitment_tx);
400         }
401         mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
402         expect_payment_sent!(nodes[0], payment_preimage_1);
403         connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
404         let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
405         assert_eq!(as_htlc_timeout_txn.len(), 2);
406         let (first_htlc_timeout_tx, second_htlc_timeout_tx) = (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1]);
407         check_spends!(first_htlc_timeout_tx, as_commitment_tx);
408         check_spends!(second_htlc_timeout_tx, as_commitment_tx);
409         if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
410                 confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
411         } else {
412                 confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
413         }
414         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
415         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
416
417         // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
418         // reloaded) via a route over the new channel, which work without issue and eventually be
419         // received and claimed at the recipient just like any other payment.
420         let (mut new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
421
422         // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
423         // and not the original fee. We also update node[1]'s relevant config as
424         // do_claim_payment_along_route expects us to never overpay.
425         {
426                 let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
427                 let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
428                         .unwrap().lock().unwrap();
429                 let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
430                 let mut new_config = channel.config();
431                 new_config.forwarding_fee_base_msat += 100_000;
432                 channel.update_config(&new_config);
433                 new_route.paths[0][0].fee_msat += 100_000;
434         }
435
436         // Force expiration of the channel's previous config.
437         for _ in 0..EXPIRE_PREV_CONFIG_TICKS {
438                 nodes[1].node.timer_tick_occurred();
439         }
440
441         assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
442         nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
443         check_added_monitors!(nodes[0], 1);
444         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
445         assert_eq!(events.len(), 1);
446         pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
447         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
448         expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0][0].fee_msat));
449 }
450
451 #[test]
452 fn retry_with_no_persist() {
453         do_retry_with_no_persist(true);
454         do_retry_with_no_persist(false);
455 }
456
457 fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
458         // Test that an off-chain completed payment is not retryable on restart. This was previously
459         // broken for dust payments, but we test for both dust and non-dust payments.
460         //
461         // `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
462         // output at all.
463         let chanmon_cfgs = create_chanmon_cfgs(3);
464         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
465
466         let mut manually_accept_config = test_default_channel_config();
467         manually_accept_config.manually_accept_inbound_channels = true;
468
469         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]);
470
471         let first_persister: test_utils::TestPersister;
472         let first_new_chain_monitor: test_utils::TestChainMonitor;
473         let first_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
474         let second_persister: test_utils::TestPersister;
475         let second_new_chain_monitor: test_utils::TestChainMonitor;
476         let second_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
477         let third_persister: test_utils::TestPersister;
478         let third_new_chain_monitor: test_utils::TestChainMonitor;
479         let third_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
480
481         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
482
483         // Because we set nodes[1] to manually accept channels, just open a 0-conf channel.
484         let (funding_tx, chan_id) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
485         confirm_transaction(&nodes[0], &funding_tx);
486         confirm_transaction(&nodes[1], &funding_tx);
487         // Ignore the announcement_signatures messages
488         nodes[0].node.get_and_clear_pending_msg_events();
489         nodes[1].node.get_and_clear_pending_msg_events();
490         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
491
492         // Serialize the ChannelManager prior to sending payments
493         let mut nodes_0_serialized = nodes[0].node.encode();
494
495         let route = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }).0;
496         let (payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], if use_dust { 1_000 } else { 1_000_000 });
497
498         // The ChannelMonitor should always be the latest version, as we're required to persist it
499         // during the `commitment_signed_dance!()`.
500         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
501
502         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
503         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
504
505         // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
506         // force-close the channel.
507         check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
508         nodes[0].node.timer_tick_occurred();
509         assert!(nodes[0].node.list_channels().is_empty());
510         assert!(nodes[0].node.has_pending_payments());
511         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
512         check_added_monitors!(nodes[0], 1);
513
514         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
515         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
516
517         // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
518         // error, as the channel has hit the chain.
519         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
520         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
521         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
522         let as_err = nodes[0].node.get_and_clear_pending_msg_events();
523         assert_eq!(as_err.len(), 1);
524         let bs_commitment_tx;
525         match as_err[0] {
526                 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
527                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
528                         nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
529                         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id()) });
530                         check_added_monitors!(nodes[1], 1);
531                         bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
532                 },
533                 _ => panic!("Unexpected event"),
534         }
535         check_closed_broadcast!(nodes[1], false);
536
537         // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
538         // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
539         // incoming HTLCs with the same payment hash later.
540         nodes[2].node.fail_htlc_backwards(&payment_hash);
541         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
542         check_added_monitors!(nodes[2], 1);
543
544         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
545         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]);
546         commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
547         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
548                 [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
549
550         // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
551         // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
552         // after the commitment transaction, so always connect the commitment transaction.
553         mine_transaction(&nodes[0], &bs_commitment_tx[0]);
554         mine_transaction(&nodes[1], &bs_commitment_tx[0]);
555         if !use_dust {
556                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
557                 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
558                 let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
559                 check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
560                 assert_eq!(as_htlc_timeout.len(), 1);
561
562                 mine_transaction(&nodes[0], &as_htlc_timeout[0]);
563                 // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
564                 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
565                 mine_transaction(&nodes[1], &as_htlc_timeout[0]);
566         }
567
568         // Create a new channel on which to retry the payment before we fail the payment via the
569         // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
570         // connecting several blocks while creating the channel (implying time has passed).
571         // We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
572         let (_, chan_id_3) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
573         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
574
575         // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
576         // confirming, we will fail as it's considered still-pending...
577         let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 });
578         match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
579                 Err(PaymentSendFailure::DuplicatePayment) => {},
580                 _ => panic!("Unexpected error")
581         }
582         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
583
584         // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
585         // again. We serialize the node first as we'll then test retrying the HTLC after a restart
586         // (which should also still work).
587         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
588         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
589         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new());
590
591         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
592         let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
593         nodes_0_serialized = nodes[0].node.encode();
594
595         // After the payment failed, we're free to send it again.
596         assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
597         assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
598
599         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
600         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
601
602         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
603
604         // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
605         // the payment is not (spuriously) listed as still pending.
606         assert!(nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id).is_ok());
607         check_added_monitors!(nodes[0], 1);
608         pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret);
609         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
610
611         match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
612                 Err(PaymentSendFailure::DuplicatePayment) => {},
613                 _ => panic!("Unexpected error")
614         }
615         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
616
617         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
618         let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
619         nodes_0_serialized = nodes[0].node.encode();
620
621         // Check that after reload we can send the payment again (though we shouldn't, since it was
622         // claimed previously).
623         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
624         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
625
626         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
627
628         match nodes[0].node.send_payment(&new_route, payment_hash, &Some(payment_secret), payment_id) {
629                 Err(PaymentSendFailure::DuplicatePayment) => {},
630                 _ => panic!("Unexpected error")
631         }
632         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
633 }
634
635 #[test]
636 fn test_completed_payment_not_retryable_on_reload() {
637         do_test_completed_payment_not_retryable_on_reload(true);
638         do_test_completed_payment_not_retryable_on_reload(false);
639 }
640
641
642 fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
643         // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
644         // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
645         // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
646         // the ChannelMonitor tells it to.
647         //
648         // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
649         // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
650         // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
651         let chanmon_cfgs = create_chanmon_cfgs(2);
652         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
653         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
654         let persister: test_utils::TestPersister;
655         let new_chain_monitor: test_utils::TestChainMonitor;
656         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
657         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
658
659         let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
660
661         // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
662         // nodes[0].
663         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
664         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
665         check_closed_broadcast!(nodes[0], true);
666         check_added_monitors!(nodes[0], 1);
667         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
668
669         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
670         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
671
672         // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
673         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
674         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
675         assert_eq!(node_txn.len(), 3);
676         assert_eq!(node_txn[0], node_txn[1]);
677         check_spends!(node_txn[1], funding_tx);
678         check_spends!(node_txn[2], node_txn[1]);
679         let timeout_txn = vec![node_txn[2].clone()];
680
681         nodes[1].node.claim_funds(payment_preimage);
682         check_added_monitors!(nodes[1], 1);
683         expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
684
685         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
686         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
687         check_closed_broadcast!(nodes[1], true);
688         check_added_monitors!(nodes[1], 1);
689         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
690         let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
691         assert_eq!(claim_txn.len(), 1);
692         check_spends!(claim_txn[0], node_txn[1]);
693
694         header.prev_blockhash = nodes[0].best_block_hash();
695         connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
696
697         if confirm_commitment_tx {
698                 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
699         }
700
701         header.prev_blockhash = nodes[0].best_block_hash();
702         let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
703
704         if payment_timeout {
705                 assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
706                 connect_block(&nodes[0], &claim_block);
707                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
708         }
709
710         // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
711         // returning InProgress. This should cause the claim event to never make its way to the
712         // ChannelManager.
713         chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
714         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
715
716         if payment_timeout {
717                 connect_blocks(&nodes[0], 1);
718         } else {
719                 connect_block(&nodes[0], &claim_block);
720         }
721
722         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
723         let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
724                 .get_mut(&funding_txo).unwrap().drain().collect();
725         // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
726         // If we're testing connection idempotency we may get substantially more.
727         assert!(mon_updates.len() >= 1);
728         assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
729         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
730
731         // If we persist the ChannelManager here, we should get the PaymentSent event after
732         // deserialization.
733         let mut chan_manager_serialized = Vec::new();
734         if !persist_manager_post_event {
735                 chan_manager_serialized = nodes[0].node.encode();
736         }
737
738         // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
739         // payment sent event.
740         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
741         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
742         for update in mon_updates {
743                 nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
744         }
745         if payment_timeout {
746                 expect_payment_failed!(nodes[0], payment_hash, false);
747         } else {
748                 expect_payment_sent!(nodes[0], payment_preimage);
749         }
750
751         // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
752         // twice.
753         if persist_manager_post_event {
754                 chan_manager_serialized = nodes[0].node.encode();
755         }
756
757         // Now reload nodes[0]...
758         reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
759
760         if persist_manager_post_event {
761                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
762         } else if payment_timeout {
763                 expect_payment_failed!(nodes[0], payment_hash, false);
764         } else {
765                 expect_payment_sent!(nodes[0], payment_preimage);
766         }
767
768         // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
769         // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
770         // payment events should kick in, leaving us with no pending events here.
771         let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
772         nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
773         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
774 }
775
776 #[test]
777 fn test_dup_htlc_onchain_fails_on_reload() {
778         do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
779         do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
780         do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
781         do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
782         do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
783         do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
784 }
785
786 #[test]
787 fn test_fulfill_restart_failure() {
788         // When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully
789         // fulfilled. At this point, the peer can reconnect and decide to either fulfill the HTLC
790         // again, or fail it, giving us free money.
791         //
792         // Of course probably they won't fail it and give us free money, but because we have code to
793         // handle it, we should test the logic for it anyway. We do that here.
794         let chanmon_cfgs = create_chanmon_cfgs(2);
795         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
796         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
797         let persister: test_utils::TestPersister;
798         let new_chain_monitor: test_utils::TestChainMonitor;
799         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
800         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
801
802         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
803         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
804
805         // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
806         // pre-fulfill, which we do by serializing it here.
807         let chan_manager_serialized = nodes[1].node.encode();
808         let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode();
809
810         nodes[1].node.claim_funds(payment_preimage);
811         check_added_monitors!(nodes[1], 1);
812         expect_payment_claimed!(nodes[1], payment_hash, 100_000);
813
814         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
815         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
816         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
817
818         // Now reload nodes[1]...
819         reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
820
821         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
822         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
823
824         nodes[1].node.fail_htlc_backwards(&payment_hash);
825         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
826         check_added_monitors!(nodes[1], 1);
827         let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
828         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
829         commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false);
830         // nodes[0] shouldn't generate any events here, while it just got a payment failure completion
831         // it had already considered the payment fulfilled, and now they just got free money.
832         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
833 }
834
835 #[test]
836 fn get_ldk_payment_preimage() {
837         // Ensure that `ChannelManager::get_payment_preimage` can successfully be used to claim a payment.
838         let chanmon_cfgs = create_chanmon_cfgs(2);
839         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
840         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
841         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
842         create_announced_chan_between_nodes(&nodes, 0, 1);
843
844         let amt_msat = 60_000;
845         let expiry_secs = 60 * 60;
846         let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap();
847
848         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
849                 .with_features(nodes[1].node.invoice_features());
850         let scorer = test_utils::TestScorer::new();
851         let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
852         let random_seed_bytes = keys_manager.get_secure_random_bytes();
853         let route = get_route(
854                 &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
855                 Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
856                 amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
857         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
858         check_added_monitors!(nodes[0], 1);
859
860         // Make sure to use `get_payment_preimage`
861         let payment_preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
862         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
863         assert_eq!(events.len(), 1);
864         pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage));
865         claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
866 }
867
868 #[test]
869 fn sent_probe_is_probe_of_sending_node() {
870         let chanmon_cfgs = create_chanmon_cfgs(3);
871         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
872         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
873         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
874
875         create_announced_chan_between_nodes(&nodes, 0, 1);
876         create_announced_chan_between_nodes(&nodes, 1, 2);
877
878         // First check we refuse to build a single-hop probe
879         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
880         assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err());
881
882         // Then build an actual two-hop probing path
883         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
884
885         match nodes[0].node.send_probe(route.paths[0].clone()) {
886                 Ok((payment_hash, payment_id)) => {
887                         assert!(nodes[0].node.payment_is_probe(&payment_hash, &payment_id));
888                         assert!(!nodes[1].node.payment_is_probe(&payment_hash, &payment_id));
889                         assert!(!nodes[2].node.payment_is_probe(&payment_hash, &payment_id));
890                 },
891                 _ => panic!(),
892         }
893
894         get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
895         check_added_monitors!(nodes[0], 1);
896 }
897
898 #[test]
899 fn successful_probe_yields_event() {
900         let chanmon_cfgs = create_chanmon_cfgs(3);
901         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
902         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
903         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
904
905         create_announced_chan_between_nodes(&nodes, 0, 1);
906         create_announced_chan_between_nodes(&nodes, 1, 2);
907
908         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
909
910         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
911
912         // node[0] -- update_add_htlcs -> node[1]
913         check_added_monitors!(nodes[0], 1);
914         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
915         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
916         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
917         check_added_monitors!(nodes[1], 0);
918         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
919         expect_pending_htlcs_forwardable!(nodes[1]);
920
921         // node[1] -- update_add_htlcs -> node[2]
922         check_added_monitors!(nodes[1], 1);
923         let updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
924         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
925         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &probe_event.msgs[0]);
926         check_added_monitors!(nodes[2], 0);
927         commitment_signed_dance!(nodes[2], nodes[1], probe_event.commitment_msg, true, true);
928
929         // node[1] <- update_fail_htlcs -- node[2]
930         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
931         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
932         check_added_monitors!(nodes[1], 0);
933         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, true);
934
935         // node[0] <- update_fail_htlcs -- node[1]
936         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
937         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
938         check_added_monitors!(nodes[0], 0);
939         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
940
941         let mut events = nodes[0].node.get_and_clear_pending_events();
942         assert_eq!(events.len(), 1);
943         match events.drain(..).next().unwrap() {
944                 crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
945                         assert_eq!(payment_id, ev_pid);
946                         assert_eq!(payment_hash, ev_ph);
947                 },
948                 _ => panic!(),
949         };
950         assert!(!nodes[0].node.has_pending_payments());
951 }
952
953 #[test]
954 fn failed_probe_yields_event() {
955         let chanmon_cfgs = create_chanmon_cfgs(3);
956         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
957         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
958         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
959
960         create_announced_chan_between_nodes(&nodes, 0, 1);
961         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000);
962
963         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
964
965         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
966
967         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
968
969         // node[0] -- update_add_htlcs -> node[1]
970         check_added_monitors!(nodes[0], 1);
971         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
972         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
973         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
974         check_added_monitors!(nodes[1], 0);
975         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
976         expect_pending_htlcs_forwardable!(nodes[1]);
977
978         // node[0] <- update_fail_htlcs -- node[1]
979         check_added_monitors!(nodes[1], 1);
980         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
981         // Skip the PendingHTLCsForwardable event
982         let _events = nodes[1].node.get_and_clear_pending_events();
983         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
984         check_added_monitors!(nodes[0], 0);
985         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
986
987         let mut events = nodes[0].node.get_and_clear_pending_events();
988         assert_eq!(events.len(), 1);
989         match events.drain(..).next().unwrap() {
990                 crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
991                         assert_eq!(payment_id, ev_pid);
992                         assert_eq!(payment_hash, ev_ph);
993                 },
994                 _ => panic!(),
995         };
996         assert!(!nodes[0].node.has_pending_payments());
997 }
998
999 #[test]
1000 fn onchain_failed_probe_yields_event() {
1001         // Tests that an attempt to probe over a channel that is eventaully closed results in a failure
1002         // event.
1003         let chanmon_cfgs = create_chanmon_cfgs(3);
1004         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1005         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1006         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1007
1008         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1009         create_announced_chan_between_nodes(&nodes, 1, 2);
1010
1011         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42);
1012
1013         // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
1014         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000, 42);
1015         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
1016
1017         // node[0] -- update_add_htlcs -> node[1]
1018         check_added_monitors!(nodes[0], 1);
1019         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1020         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
1021         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
1022         check_added_monitors!(nodes[1], 0);
1023         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
1024         expect_pending_htlcs_forwardable!(nodes[1]);
1025
1026         check_added_monitors!(nodes[1], 1);
1027         let _ = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1028
1029         // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on
1030         // Node A, which after 6 confirmations should result in a probe failure event.
1031         let bs_txn = get_local_commitment_txn!(nodes[1], chan_id);
1032         confirm_transaction(&nodes[0], &bs_txn[0]);
1033         check_closed_broadcast!(&nodes[0], true);
1034         check_added_monitors!(nodes[0], 1);
1035
1036         let mut events = nodes[0].node.get_and_clear_pending_events();
1037         assert_eq!(events.len(), 2);
1038         let mut found_probe_failed = false;
1039         for event in events.drain(..) {
1040                 match event {
1041                         Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
1042                                 assert_eq!(payment_id, ev_pid);
1043                                 assert_eq!(payment_hash, ev_ph);
1044                                 found_probe_failed = true;
1045                         },
1046                         Event::ChannelClosed { .. } => {},
1047                         _ => panic!(),
1048                 }
1049         }
1050         assert!(found_probe_failed);
1051         assert!(!nodes[0].node.has_pending_payments());
1052 }
1053
1054 #[test]
1055 fn claimed_send_payment_idempotent() {
1056         // Tests that `send_payment` (and friends) are (reasonably) idempotent.
1057         let chanmon_cfgs = create_chanmon_cfgs(2);
1058         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1059         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1060         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1061
1062         create_announced_chan_between_nodes(&nodes, 0, 1).2;
1063
1064         let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1065         let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1066
1067         macro_rules! check_send_rejected {
1068                 () => {
1069                         // If we try to resend a new payment with a different payment_hash but with the same
1070                         // payment_id, it should be rejected.
1071                         let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1072                         match send_result {
1073                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1074                                 _ => panic!("Unexpected send result: {:?}", send_result),
1075                         }
1076
1077                         // Further, if we try to send a spontaneous payment with the same payment_id it should
1078                         // also be rejected.
1079                         let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1080                         match send_result {
1081                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1082                                 _ => panic!("Unexpected send result: {:?}", send_result),
1083                         }
1084                 }
1085         }
1086
1087         check_send_rejected!();
1088
1089         // Claim the payment backwards, but note that the PaymentSent event is still pending and has
1090         // not been seen by the user. At this point, from the user perspective nothing has changed, so
1091         // we must remain just as idempotent as we were before.
1092         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, first_payment_preimage);
1093
1094         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1095                 nodes[0].node.timer_tick_occurred();
1096         }
1097
1098         check_send_rejected!();
1099
1100         // Once the user sees and handles the `PaymentSent` event, we expect them to no longer call
1101         // `send_payment`, and our idempotency guarantees are off - they should have atomically marked
1102         // the payment complete. However, they could have called `send_payment` while the event was
1103         // being processed, leading to a race in our idempotency guarantees. Thus, even immediately
1104         // after the event is handled a duplicate payment should sitll be rejected.
1105         expect_payment_sent!(&nodes[0], first_payment_preimage, Some(0));
1106         check_send_rejected!();
1107
1108         // If relatively little time has passed, a duplicate payment should still fail.
1109         nodes[0].node.timer_tick_occurred();
1110         check_send_rejected!();
1111
1112         // However, after some time has passed (at least more than the one timer tick above), a
1113         // duplicate payment should go through, as ChannelManager should no longer have any remaining
1114         // references to the old payment data.
1115         for _ in 0..IDEMPOTENCY_TIMEOUT_TICKS {
1116                 nodes[0].node.timer_tick_occurred();
1117         }
1118
1119         nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1120         check_added_monitors!(nodes[0], 1);
1121         pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1122         claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1123 }
1124
1125 #[test]
1126 fn abandoned_send_payment_idempotent() {
1127         // Tests that `send_payment` (and friends) allow duplicate PaymentIds immediately after
1128         // abandon_payment.
1129         let chanmon_cfgs = create_chanmon_cfgs(2);
1130         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1131         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1132         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1133
1134         create_announced_chan_between_nodes(&nodes, 0, 1).2;
1135
1136         let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1137         let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1138
1139         macro_rules! check_send_rejected {
1140                 () => {
1141                         // If we try to resend a new payment with a different payment_hash but with the same
1142                         // payment_id, it should be rejected.
1143                         let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1144                         match send_result {
1145                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1146                                 _ => panic!("Unexpected send result: {:?}", send_result),
1147                         }
1148
1149                         // Further, if we try to send a spontaneous payment with the same payment_id it should
1150                         // also be rejected.
1151                         let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1152                         match send_result {
1153                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1154                                 _ => panic!("Unexpected send result: {:?}", send_result),
1155                         }
1156                 }
1157         }
1158
1159         check_send_rejected!();
1160
1161         nodes[1].node.fail_htlc_backwards(&first_payment_hash);
1162         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
1163
1164         // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
1165         // PaymentId.
1166         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1167                 nodes[0].node.timer_tick_occurred();
1168         }
1169         check_send_rejected!();
1170
1171         pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
1172
1173         // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the
1174         // failed payment back.
1175         nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1176         check_added_monitors!(nodes[0], 1);
1177         pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1178         claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1179 }
1180
1181 #[derive(PartialEq)]
1182 enum InterceptTest {
1183         Forward,
1184         Fail,
1185         Timeout,
1186 }
1187
1188 #[test]
1189 fn test_trivial_inflight_htlc_tracking(){
1190         // In this test, we test three scenarios:
1191         // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs
1192         // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs
1193         // (3) After we claim the payment sent in (2), InFlightHtlcs should return `None` for the query.
1194         let chanmon_cfgs = create_chanmon_cfgs(3);
1195         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1196         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1197         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1198
1199         let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
1200         let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
1201
1202         // Send and claim the payment. Inflight HTLCs should be empty.
1203         let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
1204         let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1205         {
1206                 let mut node_0_per_peer_lock;
1207                 let mut node_0_peer_state_lock;
1208                 let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1209
1210                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1211                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1212                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1213                         channel_1.get_short_channel_id().unwrap()
1214                 );
1215                 assert_eq!(chan_1_used_liquidity, None);
1216         }
1217         {
1218                 let mut node_1_per_peer_lock;
1219                 let mut node_1_peer_state_lock;
1220                 let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1221
1222                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1223                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1224                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1225                         channel_2.get_short_channel_id().unwrap()
1226                 );
1227
1228                 assert_eq!(chan_2_used_liquidity, None);
1229         }
1230         let pending_payments = nodes[0].node.list_recent_payments();
1231         assert_eq!(pending_payments.len(), 1);
1232         assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash) });
1233
1234         // Remove fulfilled payment
1235         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1236                 nodes[0].node.timer_tick_occurred();
1237         }
1238
1239         // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
1240         let (payment_preimage, payment_hash,  _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
1241         let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1242         {
1243                 let mut node_0_per_peer_lock;
1244                 let mut node_0_peer_state_lock;
1245                 let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1246
1247                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1248                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1249                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1250                         channel_1.get_short_channel_id().unwrap()
1251                 );
1252                 // First hop accounts for expected 1000 msat fee
1253                 assert_eq!(chan_1_used_liquidity, Some(501000));
1254         }
1255         {
1256                 let mut node_1_per_peer_lock;
1257                 let mut node_1_peer_state_lock;
1258                 let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1259
1260                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1261                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1262                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1263                         channel_2.get_short_channel_id().unwrap()
1264                 );
1265
1266                 assert_eq!(chan_2_used_liquidity, Some(500000));
1267         }
1268         let pending_payments = nodes[0].node.list_recent_payments();
1269         assert_eq!(pending_payments.len(), 1);
1270         assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_hash, total_msat: 500000 });
1271
1272         // Now, let's claim the payment. This should result in the used liquidity to return `None`.
1273         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1274
1275         // Remove fulfilled payment
1276         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1277                 nodes[0].node.timer_tick_occurred();
1278         }
1279
1280         let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1281         {
1282                 let mut node_0_per_peer_lock;
1283                 let mut node_0_peer_state_lock;
1284                 let channel_1 =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id);
1285
1286                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1287                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1288                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1289                         channel_1.get_short_channel_id().unwrap()
1290                 );
1291                 assert_eq!(chan_1_used_liquidity, None);
1292         }
1293         {
1294                 let mut node_1_per_peer_lock;
1295                 let mut node_1_peer_state_lock;
1296                 let channel_2 =  get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id);
1297
1298                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1299                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1300                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1301                         channel_2.get_short_channel_id().unwrap()
1302                 );
1303                 assert_eq!(chan_2_used_liquidity, None);
1304         }
1305
1306         let pending_payments = nodes[0].node.list_recent_payments();
1307         assert_eq!(pending_payments.len(), 0);
1308 }
1309
1310 #[test]
1311 fn test_holding_cell_inflight_htlcs() {
1312         let chanmon_cfgs = create_chanmon_cfgs(2);
1313         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1314         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1315         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1316         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1317
1318         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1319         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1320
1321         // Queue up two payments - one will be delivered right away, one immediately goes into the
1322         // holding cell as nodes[0] is AwaitingRAA.
1323         {
1324                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1325                 check_added_monitors!(nodes[0], 1);
1326                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1327                 check_added_monitors!(nodes[0], 0);
1328         }
1329
1330         let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1331
1332         {
1333                 let mut node_0_per_peer_lock;
1334                 let mut node_0_peer_state_lock;
1335                 let channel =  get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id);
1336
1337                 let used_liquidity = inflight_htlcs.used_liquidity_msat(
1338                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1339                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1340                         channel.get_short_channel_id().unwrap()
1341                 );
1342
1343                 assert_eq!(used_liquidity, Some(2000000));
1344         }
1345
1346         // Clear pending events so test doesn't throw a "Had excess message on node..." error
1347         nodes[0].node.get_and_clear_pending_msg_events();
1348 }
1349
1350 #[test]
1351 fn intercepted_payment() {
1352         // Test that detecting an intercept scid on payment forward will signal LDK to generate an
1353         // intercept event, which the LSP can then use to either (a) open a JIT channel to forward the
1354         // payment or (b) fail the payment.
1355         do_test_intercepted_payment(InterceptTest::Forward);
1356         do_test_intercepted_payment(InterceptTest::Fail);
1357         // Make sure that intercepted payments will be automatically failed back if too many blocks pass.
1358         do_test_intercepted_payment(InterceptTest::Timeout);
1359 }
1360
1361 fn do_test_intercepted_payment(test: InterceptTest) {
1362         let chanmon_cfgs = create_chanmon_cfgs(3);
1363         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1364
1365         let mut zero_conf_chan_config = test_default_channel_config();
1366         zero_conf_chan_config.manually_accept_inbound_channels = true;
1367         let mut intercept_forwards_config = test_default_channel_config();
1368         intercept_forwards_config.accept_intercept_htlcs = true;
1369         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]);
1370
1371         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1372         let scorer = test_utils::TestScorer::new();
1373         let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
1374
1375         let _ = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1376
1377         let amt_msat = 100_000;
1378         let intercept_scid = nodes[1].node.get_intercept_scid();
1379         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1380                 .with_route_hints(vec![
1381                         RouteHint(vec![RouteHintHop {
1382                                 src_node_id: nodes[1].node.get_our_node_id(),
1383                                 short_channel_id: intercept_scid,
1384                                 fees: RoutingFees {
1385                                         base_msat: 1000,
1386                                         proportional_millionths: 0,
1387                                 },
1388                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
1389                                 htlc_minimum_msat: None,
1390                                 htlc_maximum_msat: None,
1391                         }])
1392                 ])
1393                 .with_features(nodes[2].node.invoice_features());
1394         let route_params = RouteParameters {
1395                 payment_params,
1396                 final_value_msat: amt_msat,
1397         };
1398         let route = get_route(
1399                 &nodes[0].node.get_our_node_id(), &route_params.payment_params,
1400                 &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
1401                 route_params.payment_params.final_cltv_expiry_delta, nodes[0].logger, &scorer,
1402                 &random_seed_bytes,
1403         ).unwrap();
1404
1405         let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap();
1406         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
1407         let payment_event = {
1408                 {
1409                         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
1410                         assert_eq!(added_monitors.len(), 1);
1411                         added_monitors.clear();
1412                 }
1413                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1414                 assert_eq!(events.len(), 1);
1415                 SendEvent::from_event(events.remove(0))
1416         };
1417         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1418         commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
1419
1420         // Check that we generate the PaymentIntercepted event when an intercept forward is detected.
1421         let events = nodes[1].node.get_and_clear_pending_events();
1422         assert_eq!(events.len(), 1);
1423         let (intercept_id, expected_outbound_amount_msat) = match events[0] {
1424                 crate::util::events::Event::HTLCIntercepted {
1425                         intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
1426                 } => {
1427                         assert_eq!(pmt_hash, payment_hash);
1428                         assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees());
1429                         assert_eq!(short_channel_id, intercept_scid);
1430                         (intercept_id, expected_outbound_amount_msat)
1431                 },
1432                 _ => panic!()
1433         };
1434
1435         // Check for unknown channel id error.
1436         let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1437         assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
1438
1439         if test == InterceptTest::Fail {
1440                 // Ensure we can fail the intercepted payment back.
1441                 nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
1442                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
1443                 nodes[1].node.process_pending_htlc_forwards();
1444                 let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1445                 check_added_monitors!(&nodes[1], 1);
1446                 assert!(update_fail.update_fail_htlcs.len() == 1);
1447                 let fail_msg = update_fail.update_fail_htlcs[0].clone();
1448                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
1449                 commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
1450
1451                 // Ensure the payment fails with the expected error.
1452                 let fail_conditions = PaymentFailedConditions::new()
1453                         .blamed_scid(intercept_scid)
1454                         .blamed_chan_closed(true)
1455                         .expected_htlc_error_data(0x4000 | 10, &[]);
1456                 expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions);
1457         } else if test == InterceptTest::Forward {
1458                 // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet.
1459                 let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
1460                 let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1461                 assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(temp_chan_id)) });
1462                 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
1463
1464                 // Open the just-in-time channel so the payment can then be forwarded.
1465                 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1466
1467                 // Finally, forward the intercepted payment through and claim it.
1468                 nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap();
1469                 expect_pending_htlcs_forwardable!(nodes[1]);
1470
1471                 let payment_event = {
1472                         {
1473                                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1474                                 assert_eq!(added_monitors.len(), 1);
1475                                 added_monitors.clear();
1476                         }
1477                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1478                         assert_eq!(events.len(), 1);
1479                         SendEvent::from_event(events.remove(0))
1480                 };
1481                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
1482                 commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true);
1483                 expect_pending_htlcs_forwardable!(nodes[2]);
1484
1485                 let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
1486                 expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
1487                 do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
1488                 let events = nodes[0].node.get_and_clear_pending_events();
1489                 assert_eq!(events.len(), 2);
1490                 match events[0] {
1491                         Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
1492                                 assert_eq!(payment_preimage, *ev_preimage);
1493                                 assert_eq!(payment_hash, *ev_hash);
1494                                 assert_eq!(fee_paid_msat, &Some(1000));
1495                         },
1496                         _ => panic!("Unexpected event")
1497                 }
1498                 match events[1] {
1499                         Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
1500                                 assert_eq!(hash, Some(payment_hash));
1501                         },
1502                         _ => panic!("Unexpected event")
1503                 }
1504         } else if test == InterceptTest::Timeout {
1505                 let mut block = Block {
1506                         header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
1507                         txdata: vec![],
1508                 };
1509                 connect_block(&nodes[0], &block);
1510                 connect_block(&nodes[1], &block);
1511                 for _ in 0..TEST_FINAL_CLTV {
1512                         block.header.prev_blockhash = block.block_hash();
1513                         connect_block(&nodes[0], &block);
1514                         connect_block(&nodes[1], &block);
1515                 }
1516                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::InvalidForward { requested_forward_scid: intercept_scid }]);
1517                 check_added_monitors!(nodes[1], 1);
1518                 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1519                 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
1520                 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
1521                 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
1522                 assert!(htlc_timeout_updates.update_fee.is_none());
1523
1524                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
1525                 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
1526                 expect_payment_failed!(nodes[0], payment_hash, false, 0x2000 | 2, []);
1527
1528                 // Check for unknown intercept id error.
1529                 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1530                 let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1531                 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1532                 let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err();
1533                 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1534         }
1535 }
1536
1537 #[derive(PartialEq)]
1538 enum AutoRetry {
1539         Success,
1540         Spontaneous,
1541         FailAttempts,
1542         FailTimeout,
1543         FailOnRestart,
1544         FailOnRetry,
1545 }
1546
1547 #[test]
1548 fn automatic_retries() {
1549         do_automatic_retries(AutoRetry::Success);
1550         do_automatic_retries(AutoRetry::Spontaneous);
1551         do_automatic_retries(AutoRetry::FailAttempts);
1552         do_automatic_retries(AutoRetry::FailTimeout);
1553         do_automatic_retries(AutoRetry::FailOnRestart);
1554         do_automatic_retries(AutoRetry::FailOnRetry);
1555 }
1556 fn do_automatic_retries(test: AutoRetry) {
1557         // Test basic automatic payment retries in ChannelManager. See individual `test` variant comments
1558         // below.
1559         let chanmon_cfgs = create_chanmon_cfgs(3);
1560         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1561         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1562
1563         let persister;
1564         let new_chain_monitor;
1565         let node_0_deserialized;
1566
1567         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1568         let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1569         let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1570
1571         // Marshall data to send the payment
1572         #[cfg(feature = "std")]
1573         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1574         #[cfg(not(feature = "std"))]
1575         let payment_expiry_secs = 60 * 60;
1576         let amt_msat = 1000;
1577         let mut invoice_features = InvoiceFeatures::empty();
1578         invoice_features.set_variable_length_onion_required();
1579         invoice_features.set_payment_secret_required();
1580         invoice_features.set_basic_mpp_optional();
1581         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
1582                 .with_expiry_time(payment_expiry_secs as u64)
1583                 .with_features(invoice_features);
1584         let route_params = RouteParameters {
1585                 payment_params,
1586                 final_value_msat: amt_msat,
1587         };
1588         let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat);
1589
1590         macro_rules! pass_failed_attempt_with_retry_along_path {
1591                 ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => {
1592                         // Send a payment attempt that fails due to lack of liquidity on the second hop
1593                         check_added_monitors!(nodes[0], 1);
1594                         let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1595                         let mut update_add = update_0.update_add_htlcs[0].clone();
1596                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
1597                         commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
1598                         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1599                         nodes[1].node.process_pending_htlc_forwards();
1600                         expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
1601                                 vec![HTLCDestination::NextHopChannel {
1602                                         node_id: Some(nodes[2].node.get_our_node_id()),
1603                                         channel_id: $failing_channel_id,
1604                                 }]);
1605                         nodes[1].node.process_pending_htlc_forwards();
1606                         let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1607                         check_added_monitors!(&nodes[1], 1);
1608                         assert!(update_1.update_fail_htlcs.len() == 1);
1609                         let fail_msg = update_1.update_fail_htlcs[0].clone();
1610                         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
1611                         commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false);
1612
1613                         // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry
1614                         let mut events = nodes[0].node.get_and_clear_pending_events();
1615                         assert_eq!(events.len(), 2);
1616                         match events[0] {
1617                                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
1618                                         assert_eq!(payment_hash, ev_payment_hash);
1619                                         assert_eq!(payment_failed_permanently, false);
1620                                 },
1621                                 _ => panic!("Unexpected event"),
1622                         }
1623                         if $expect_pending_htlcs_forwardable {
1624                                 match events[1] {
1625                                         Event::PendingHTLCsForwardable { .. } => {},
1626                                         _ => panic!("Unexpected event"),
1627                                 }
1628                         } else {
1629                                 match events[1] {
1630                                         Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => {
1631                                                 assert_eq!(payment_hash, ev_payment_hash);
1632                                         },
1633                                         _ => panic!("Unexpected event"),
1634                                 }
1635                         }
1636                 }
1637         }
1638
1639         if test == AutoRetry::Success {
1640                 // Test that we can succeed on the first retry.
1641                 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1642                 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1643
1644                 // Open a new channel with liquidity on the second hop so we can find a route for the retry
1645                 // attempt, since the initial second hop channel will be excluded from pathfinding
1646                 create_announced_chan_between_nodes(&nodes, 1, 2);
1647
1648                 // We retry payments in `process_pending_htlc_forwards`
1649                 nodes[0].node.process_pending_htlc_forwards();
1650                 check_added_monitors!(nodes[0], 1);
1651                 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1652                 assert_eq!(msg_events.len(), 1);
1653                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None);
1654                 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
1655         } else if test == AutoRetry::Spontaneous {
1656                 nodes[0].node.send_spontaneous_payment_with_retry(Some(payment_preimage), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1657                 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1658
1659                 // Open a new channel with liquidity on the second hop so we can find a route for the retry
1660                 // attempt, since the initial second hop channel will be excluded from pathfinding
1661                 create_announced_chan_between_nodes(&nodes, 1, 2);
1662
1663                 // We retry payments in `process_pending_htlc_forwards`
1664                 nodes[0].node.process_pending_htlc_forwards();
1665                 check_added_monitors!(nodes[0], 1);
1666                 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1667                 assert_eq!(msg_events.len(), 1);
1668                 pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, None, msg_events.pop().unwrap(), true, Some(payment_preimage));
1669                 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
1670         } else if test == AutoRetry::FailAttempts {
1671                 // Ensure ChannelManager will not retry a payment if it has run out of payment attempts.
1672                 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1673                 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1674
1675                 // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
1676                 // the retry attempt, since the initial second hop channel will be excluded from pathfinding
1677                 let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1678
1679                 // We retry payments in `process_pending_htlc_forwards`
1680                 nodes[0].node.process_pending_htlc_forwards();
1681                 pass_failed_attempt_with_retry_along_path!(channel_id_3, false);
1682
1683                 // Ensure we won't retry a second time.
1684                 nodes[0].node.process_pending_htlc_forwards();
1685                 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1686                 assert_eq!(msg_events.len(), 0);
1687         } else if test == AutoRetry::FailTimeout {
1688                 #[cfg(not(feature = "no-std"))] {
1689                         // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
1690                         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap();
1691                         pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1692
1693                         // Advance the time so the second attempt fails due to timeout.
1694                         SinceEpoch::advance(Duration::from_secs(61));
1695
1696                         // Make sure we don't retry again.
1697                         nodes[0].node.process_pending_htlc_forwards();
1698                         let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1699                         assert_eq!(msg_events.len(), 0);
1700
1701                         let mut events = nodes[0].node.get_and_clear_pending_events();
1702                         assert_eq!(events.len(), 1);
1703                         match events[0] {
1704                                 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1705                                         assert_eq!(payment_hash, *ev_payment_hash);
1706                                         assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1707                                 },
1708                                 _ => panic!("Unexpected event"),
1709                         }
1710                 }
1711         } else if test == AutoRetry::FailOnRestart {
1712                 // Ensure ChannelManager will not retry a payment after restart, even if there were retry
1713                 // attempts remaining prior to restart.
1714                 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap();
1715                 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1716
1717                 // Open a new channel with no liquidity on the second hop so we can find a (bad) route for
1718                 // the retry attempt, since the initial second hop channel will be excluded from pathfinding
1719                 let channel_id_3 = create_announced_chan_between_nodes(&nodes, 2, 1).2;
1720
1721                 // Ensure the first retry attempt fails, with 1 retry attempt remaining
1722                 nodes[0].node.process_pending_htlc_forwards();
1723                 pass_failed_attempt_with_retry_along_path!(channel_id_3, true);
1724
1725                 // Restart the node and ensure that ChannelManager does not use its remaining retry attempt
1726                 let node_encoded = nodes[0].node.encode();
1727                 let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode();
1728                 reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized);
1729
1730                 let mut events = nodes[0].node.get_and_clear_pending_events();
1731                 expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true);
1732                 // Make sure we don't retry again.
1733                 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1734                 assert_eq!(msg_events.len(), 0);
1735
1736                 let mut events = nodes[0].node.get_and_clear_pending_events();
1737                 assert_eq!(events.len(), 1);
1738                 match events[0] {
1739                         Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1740                                 assert_eq!(payment_hash, *ev_payment_hash);
1741                                 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1742                         },
1743                         _ => panic!("Unexpected event"),
1744                 }
1745         } else if test == AutoRetry::FailOnRetry {
1746                 nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
1747                 pass_failed_attempt_with_retry_along_path!(channel_id_2, true);
1748
1749                 // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should
1750                 // fail to find a route.
1751                 nodes[0].node.process_pending_htlc_forwards();
1752                 let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1753                 assert_eq!(msg_events.len(), 0);
1754
1755                 let mut events = nodes[0].node.get_and_clear_pending_events();
1756                 assert_eq!(events.len(), 1);
1757                 match events[0] {
1758                         Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
1759                                 assert_eq!(payment_hash, *ev_payment_hash);
1760                                 assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
1761                         },
1762                         _ => panic!("Unexpected event"),
1763                 }
1764         }
1765 }
1766
1767 #[test]
1768 fn auto_retry_partial_failure() {
1769         // Test that we'll retry appropriately on send partial failure and retry partial failure.
1770         let chanmon_cfgs = create_chanmon_cfgs(2);
1771         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1772         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1773         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1774
1775         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1776         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1777         let chan_3_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1778
1779         // Marshall data to send the payment
1780         let amt_msat = 20_000;
1781         let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
1782         #[cfg(feature = "std")]
1783         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1784         #[cfg(not(feature = "std"))]
1785         let payment_expiry_secs = 60 * 60;
1786         let mut invoice_features = InvoiceFeatures::empty();
1787         invoice_features.set_variable_length_onion_required();
1788         invoice_features.set_payment_secret_required();
1789         invoice_features.set_basic_mpp_optional();
1790         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
1791                 .with_expiry_time(payment_expiry_secs as u64)
1792                 .with_features(invoice_features);
1793         let route_params = RouteParameters {
1794                 payment_params,
1795                 final_value_msat: amt_msat,
1796         };
1797
1798         // Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
1799         // second (for the initial send path2 over chan_2) fails.
1800         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1801         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1802         // Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
1803         // the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
1804         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1805         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
1806         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1807
1808         // Configure the initial send, retry1 and retry2's paths.
1809         let send_route = Route {
1810                 paths: vec![
1811                         vec![RouteHop {
1812                                 pubkey: nodes[1].node.get_our_node_id(),
1813                                 node_features: nodes[1].node.node_features(),
1814                                 short_channel_id: chan_1_id,
1815                                 channel_features: nodes[1].node.channel_features(),
1816                                 fee_msat: amt_msat / 2,
1817                                 cltv_expiry_delta: 100,
1818                         }],
1819                         vec![RouteHop {
1820                                 pubkey: nodes[1].node.get_our_node_id(),
1821                                 node_features: nodes[1].node.node_features(),
1822                                 short_channel_id: chan_2_id,
1823                                 channel_features: nodes[1].node.channel_features(),
1824                                 fee_msat: amt_msat / 2,
1825                                 cltv_expiry_delta: 100,
1826                         }],
1827                 ],
1828                 payment_params: Some(route_params.payment_params.clone()),
1829         };
1830         let retry_1_route = Route {
1831                 paths: vec![
1832                         vec![RouteHop {
1833                                 pubkey: nodes[1].node.get_our_node_id(),
1834                                 node_features: nodes[1].node.node_features(),
1835                                 short_channel_id: chan_1_id,
1836                                 channel_features: nodes[1].node.channel_features(),
1837                                 fee_msat: amt_msat / 4,
1838                                 cltv_expiry_delta: 100,
1839                         }],
1840                         vec![RouteHop {
1841                                 pubkey: nodes[1].node.get_our_node_id(),
1842                                 node_features: nodes[1].node.node_features(),
1843                                 short_channel_id: chan_3_id,
1844                                 channel_features: nodes[1].node.channel_features(),
1845                                 fee_msat: amt_msat / 4,
1846                                 cltv_expiry_delta: 100,
1847                         }],
1848                 ],
1849                 payment_params: Some(route_params.payment_params.clone()),
1850         };
1851         let retry_2_route = Route {
1852                 paths: vec![
1853                         vec![RouteHop {
1854                                 pubkey: nodes[1].node.get_our_node_id(),
1855                                 node_features: nodes[1].node.node_features(),
1856                                 short_channel_id: chan_1_id,
1857                                 channel_features: nodes[1].node.channel_features(),
1858                                 fee_msat: amt_msat / 4,
1859                                 cltv_expiry_delta: 100,
1860                         }],
1861                 ],
1862                 payment_params: Some(route_params.payment_params.clone()),
1863         };
1864         nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route));
1865         let mut payment_params = route_params.payment_params.clone();
1866         payment_params.previously_failed_channels.push(chan_2_id);
1867         nodes[0].router.expect_find_route(RouteParameters {
1868                         payment_params, final_value_msat: amt_msat / 2,
1869                 }, Ok(retry_1_route));
1870         let mut payment_params = route_params.payment_params.clone();
1871         payment_params.previously_failed_channels.push(chan_3_id);
1872         nodes[0].router.expect_find_route(RouteParameters {
1873                         payment_params, final_value_msat: amt_msat / 4,
1874                 }, Ok(retry_2_route));
1875
1876         // Send a payment that will partially fail on send, then partially fail on retry, then succeed.
1877         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap();
1878         let closed_chan_events = nodes[0].node.get_and_clear_pending_events();
1879         assert_eq!(closed_chan_events.len(), 4);
1880         match closed_chan_events[0] {
1881                 Event::ChannelClosed { .. } => {},
1882                 _ => panic!("Unexpected event"),
1883         }
1884         match closed_chan_events[1] {
1885                 Event::PaymentPathFailed { .. } => {},
1886                 _ => panic!("Unexpected event"),
1887         }
1888         match closed_chan_events[2] {
1889                 Event::ChannelClosed { .. } => {},
1890                 _ => panic!("Unexpected event"),
1891         }
1892         match closed_chan_events[3] {
1893                 Event::PaymentPathFailed { .. } => {},
1894                 _ => panic!("Unexpected event"),
1895         }
1896
1897         // Pass the first part of the payment along the path.
1898         check_added_monitors!(nodes[0], 5); // three outbound channel updates succeeded, two permanently failed
1899         let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
1900
1901         // First message is the first update_add, remaining messages are broadcasting channel updates and
1902         // errors for the permfailed channels
1903         assert_eq!(msg_events.len(), 5);
1904         let mut payment_event = SendEvent::from_event(msg_events.remove(0));
1905
1906         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1907         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1908         check_added_monitors!(nodes[1], 1);
1909         let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1910
1911         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
1912         check_added_monitors!(nodes[0], 1);
1913         let as_second_htlc_updates = SendEvent::from_node(&nodes[0]);
1914
1915         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
1916         check_added_monitors!(nodes[0], 1);
1917         let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1918
1919         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
1920         check_added_monitors!(nodes[1], 1);
1921
1922         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]);
1923         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]);
1924         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg);
1925         check_added_monitors!(nodes[1], 1);
1926         let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1927
1928         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
1929         check_added_monitors!(nodes[0], 1);
1930
1931         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
1932         check_added_monitors!(nodes[0], 1);
1933         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1934
1935         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
1936         check_added_monitors!(nodes[1], 1);
1937
1938         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1939         nodes[1].node.process_pending_htlc_forwards();
1940         expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat);
1941         nodes[1].node.claim_funds(payment_preimage);
1942         expect_payment_claimed!(nodes[1], payment_hash, amt_msat);
1943         let bs_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1944         assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1);
1945
1946         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]);
1947         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed);
1948         check_added_monitors!(nodes[0], 1);
1949         let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1950
1951         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
1952         check_added_monitors!(nodes[1], 4);
1953         let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1954
1955         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
1956         check_added_monitors!(nodes[1], 1);
1957         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1958
1959         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
1960         check_added_monitors!(nodes[0], 1);
1961
1962         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]);
1963         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]);
1964         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed);
1965         check_added_monitors!(nodes[0], 1);
1966         let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1967
1968         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_fourth_raa);
1969         check_added_monitors!(nodes[1], 1);
1970
1971         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
1972         check_added_monitors!(nodes[1], 1);
1973         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1974
1975         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
1976         check_added_monitors!(nodes[0], 1);
1977         expect_payment_sent!(nodes[0], payment_preimage);
1978 }
1979
1980 #[test]
1981 fn auto_retry_zero_attempts_send_error() {
1982         let chanmon_cfgs = create_chanmon_cfgs(2);
1983         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1984         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1985         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1986
1987         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1988         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1989
1990         // Marshall data to send the payment
1991         let amt_msat = 20_000;
1992         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
1993         #[cfg(feature = "std")]
1994         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
1995         #[cfg(not(feature = "std"))]
1996         let payment_expiry_secs = 60 * 60;
1997         let mut invoice_features = InvoiceFeatures::empty();
1998         invoice_features.set_variable_length_onion_required();
1999         invoice_features.set_payment_secret_required();
2000         invoice_features.set_basic_mpp_optional();
2001         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2002                 .with_expiry_time(payment_expiry_secs as u64)
2003                 .with_features(invoice_features);
2004         let route_params = RouteParameters {
2005                 payment_params,
2006                 final_value_msat: amt_msat,
2007         };
2008
2009         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2010         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
2011         assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 2); // channel close messages
2012         let events = nodes[0].node.get_and_clear_pending_events();
2013         assert_eq!(events.len(), 3);
2014         if let Event::ChannelClosed { .. } = events[0] { } else { panic!(); }
2015         if let Event::PaymentPathFailed { .. } = events[1] { } else { panic!(); }
2016         if let Event::PaymentFailed { .. } = events[2] { } else { panic!(); }
2017         check_added_monitors!(nodes[0], 2);
2018 }
2019
2020 #[test]
2021 fn fails_paying_after_rejected_by_payee() {
2022         let chanmon_cfgs = create_chanmon_cfgs(2);
2023         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2024         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2025         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2026
2027         create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2028
2029         // Marshall data to send the payment
2030         let amt_msat = 20_000;
2031         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2032         #[cfg(feature = "std")]
2033         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2034         #[cfg(not(feature = "std"))]
2035         let payment_expiry_secs = 60 * 60;
2036         let mut invoice_features = InvoiceFeatures::empty();
2037         invoice_features.set_variable_length_onion_required();
2038         invoice_features.set_payment_secret_required();
2039         invoice_features.set_basic_mpp_optional();
2040         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2041                 .with_expiry_time(payment_expiry_secs as u64)
2042                 .with_features(invoice_features);
2043         let route_params = RouteParameters {
2044                 payment_params,
2045                 final_value_msat: amt_msat,
2046         };
2047
2048         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2049         check_added_monitors!(nodes[0], 1);
2050         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2051         assert_eq!(events.len(), 1);
2052         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
2053         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
2054         check_added_monitors!(nodes[1], 0);
2055         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
2056         expect_pending_htlcs_forwardable!(nodes[1]);
2057         expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat);
2058
2059         nodes[1].node.fail_htlc_backwards(&payment_hash);
2060         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash }]);
2061         pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash);
2062 }
2063
2064 #[test]
2065 fn retry_multi_path_single_failed_payment() {
2066         // Tests that we can/will retry after a single path of an MPP payment failed immediately
2067         let chanmon_cfgs = create_chanmon_cfgs(2);
2068         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2069         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
2070         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2071
2072         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2073         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2074
2075         let amt_msat = 100_010_000;
2076
2077         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2078         #[cfg(feature = "std")]
2079         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2080         #[cfg(not(feature = "std"))]
2081         let payment_expiry_secs = 60 * 60;
2082         let mut invoice_features = InvoiceFeatures::empty();
2083         invoice_features.set_variable_length_onion_required();
2084         invoice_features.set_payment_secret_required();
2085         invoice_features.set_basic_mpp_optional();
2086         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2087                 .with_expiry_time(payment_expiry_secs as u64)
2088                 .with_features(invoice_features);
2089         let route_params = RouteParameters {
2090                 payment_params: payment_params.clone(),
2091                 final_value_msat: amt_msat,
2092         };
2093
2094         let chans = nodes[0].node.list_usable_channels();
2095         let mut route = Route {
2096                 paths: vec![
2097                         vec![RouteHop {
2098                                 pubkey: nodes[1].node.get_our_node_id(),
2099                                 node_features: nodes[1].node.node_features(),
2100                                 short_channel_id: chans[0].short_channel_id.unwrap(),
2101                                 channel_features: nodes[1].node.channel_features(),
2102                                 fee_msat: 10_000,
2103                                 cltv_expiry_delta: 100,
2104                         }],
2105                         vec![RouteHop {
2106                                 pubkey: nodes[1].node.get_our_node_id(),
2107                                 node_features: nodes[1].node.node_features(),
2108                                 short_channel_id: chans[1].short_channel_id.unwrap(),
2109                                 channel_features: nodes[1].node.channel_features(),
2110                                 fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
2111                                 cltv_expiry_delta: 100,
2112                         }],
2113                 ],
2114                 payment_params: Some(payment_params),
2115         };
2116         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2117         // On retry, split the payment across both channels.
2118         route.paths[0][0].fee_msat = 50_000_001;
2119         route.paths[1][0].fee_msat = 50_000_000;
2120         let mut pay_params = route.payment_params.clone().unwrap();
2121         pay_params.previously_failed_channels.push(chans[1].short_channel_id.unwrap());
2122         nodes[0].router.expect_find_route(RouteParameters {
2123                         payment_params: pay_params,
2124                         // Note that the second request here requests the amount we originally failed to send,
2125                         // not the amount remaining on the full payment, which should be changed.
2126                         final_value_msat: 100_000_001,
2127                 }, Ok(route.clone()));
2128
2129         {
2130                 let scorer = chanmon_cfgs[0].scorer.lock().unwrap();
2131                 // The initial send attempt, 2 paths
2132                 scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2133                 scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2134                 // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in.
2135                 scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown });
2136                 scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown });
2137         }
2138
2139         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2140         let events = nodes[0].node.get_and_clear_pending_events();
2141         assert_eq!(events.len(), 1);
2142         match events[0] {
2143                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
2144                         failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
2145                         short_channel_id: Some(expected_scid), .. } =>
2146                 {
2147                         assert_eq!(payment_hash, ev_payment_hash);
2148                         assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
2149                         assert!(err_msg.contains("max HTLC"));
2150                 },
2151                 _ => panic!("Unexpected event"),
2152         }
2153         let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2154         assert_eq!(htlc_msgs.len(), 2);
2155         check_added_monitors!(nodes[0], 2);
2156 }
2157
2158 #[test]
2159 fn immediate_retry_on_failure() {
2160         // Tests that we can/will retry immediately after a failure
2161         let chanmon_cfgs = create_chanmon_cfgs(2);
2162         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2163         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
2164         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2165
2166         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2167         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
2168
2169         let amt_msat = 100_000_001;
2170         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2171         #[cfg(feature = "std")]
2172         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2173         #[cfg(not(feature = "std"))]
2174         let payment_expiry_secs = 60 * 60;
2175         let mut invoice_features = InvoiceFeatures::empty();
2176         invoice_features.set_variable_length_onion_required();
2177         invoice_features.set_payment_secret_required();
2178         invoice_features.set_basic_mpp_optional();
2179         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2180                 .with_expiry_time(payment_expiry_secs as u64)
2181                 .with_features(invoice_features);
2182         let route_params = RouteParameters {
2183                 payment_params,
2184                 final_value_msat: amt_msat,
2185         };
2186
2187         let chans = nodes[0].node.list_usable_channels();
2188         let mut route = Route {
2189                 paths: vec![
2190                         vec![RouteHop {
2191                                 pubkey: nodes[1].node.get_our_node_id(),
2192                                 node_features: nodes[1].node.node_features(),
2193                                 short_channel_id: chans[0].short_channel_id.unwrap(),
2194                                 channel_features: nodes[1].node.channel_features(),
2195                                 fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than
2196                                 cltv_expiry_delta: 100,
2197                         }],
2198                 ],
2199                 payment_params: Some(PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)),
2200         };
2201         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2202         // On retry, split the payment across both channels.
2203         route.paths.push(route.paths[0].clone());
2204         route.paths[0][0].short_channel_id = chans[1].short_channel_id.unwrap();
2205         route.paths[0][0].fee_msat = 50_000_000;
2206         route.paths[1][0].fee_msat = 50_000_001;
2207         let mut pay_params = route_params.payment_params.clone();
2208         pay_params.previously_failed_channels.push(chans[0].short_channel_id.unwrap());
2209         nodes[0].router.expect_find_route(RouteParameters {
2210                         payment_params: pay_params, final_value_msat: amt_msat,
2211                 }, Ok(route.clone()));
2212
2213         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2214         let events = nodes[0].node.get_and_clear_pending_events();
2215         assert_eq!(events.len(), 1);
2216         match events[0] {
2217                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false,
2218                         failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { err: ref err_msg }},
2219                         short_channel_id: Some(expected_scid), .. } =>
2220                 {
2221                         assert_eq!(payment_hash, ev_payment_hash);
2222                         assert_eq!(expected_scid, route.paths[1][0].short_channel_id);
2223                         assert!(err_msg.contains("max HTLC"));
2224                 },
2225                 _ => panic!("Unexpected event"),
2226         }
2227         let htlc_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2228         assert_eq!(htlc_msgs.len(), 2);
2229         check_added_monitors!(nodes[0], 2);
2230 }
2231
2232 #[test]
2233 fn no_extra_retries_on_back_to_back_fail() {
2234         // In a previous release, we had a race where we may exceed the payment retry count if we
2235         // get two failures in a row with the second indicating that all paths had failed (this field,
2236         // `all_paths_failed`, has since been removed).
2237         // Generally, when we give up trying to retry a payment, we don't know for sure what the
2238         // current state of the ChannelManager event queue is. Specifically, we cannot be sure that
2239         // there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events
2240         // pending which we will see later. Thus, when we previously removed the retry tracking map
2241         // entry after a `all_paths_failed` `PaymentPathFailed` event, we may have dropped the
2242         // retry entry even though more events for the same payment were still pending. This led to
2243         // us retrying a payment again even though we'd already given up on it.
2244         //
2245         // We now have a separate event - `PaymentFailed` which indicates no HTLCs remain and which
2246         // is used to remove the payment retry counter entries instead. This tests for the specific
2247         // excess-retry case while also testing `PaymentFailed` generation.
2248
2249         let chanmon_cfgs = create_chanmon_cfgs(3);
2250         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2251         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2252         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2253
2254         let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2255         let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
2256
2257         let amt_msat = 200_000_000;
2258         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat);
2259         #[cfg(feature = "std")]
2260         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2261         #[cfg(not(feature = "std"))]
2262         let payment_expiry_secs = 60 * 60;
2263         let mut invoice_features = InvoiceFeatures::empty();
2264         invoice_features.set_variable_length_onion_required();
2265         invoice_features.set_payment_secret_required();
2266         invoice_features.set_basic_mpp_optional();
2267         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2268                 .with_expiry_time(payment_expiry_secs as u64)
2269                 .with_features(invoice_features);
2270         let route_params = RouteParameters {
2271                 payment_params,
2272                 final_value_msat: amt_msat,
2273         };
2274
2275         let mut route = Route {
2276                 paths: vec![
2277                         vec![RouteHop {
2278                                 pubkey: nodes[1].node.get_our_node_id(),
2279                                 node_features: nodes[1].node.node_features(),
2280                                 short_channel_id: chan_1_scid,
2281                                 channel_features: nodes[1].node.channel_features(),
2282                                 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2283                                 cltv_expiry_delta: 100,
2284                         }, RouteHop {
2285                                 pubkey: nodes[2].node.get_our_node_id(),
2286                                 node_features: nodes[2].node.node_features(),
2287                                 short_channel_id: chan_2_scid,
2288                                 channel_features: nodes[2].node.channel_features(),
2289                                 fee_msat: 100_000_000,
2290                                 cltv_expiry_delta: 100,
2291                         }],
2292                         vec![RouteHop {
2293                                 pubkey: nodes[1].node.get_our_node_id(),
2294                                 node_features: nodes[1].node.node_features(),
2295                                 short_channel_id: chan_1_scid,
2296                                 channel_features: nodes[1].node.channel_features(),
2297                                 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2298                                 cltv_expiry_delta: 100,
2299                         }, RouteHop {
2300                                 pubkey: nodes[2].node.get_our_node_id(),
2301                                 node_features: nodes[2].node.node_features(),
2302                                 short_channel_id: chan_2_scid,
2303                                 channel_features: nodes[2].node.channel_features(),
2304                                 fee_msat: 100_000_000,
2305                                 cltv_expiry_delta: 100,
2306                         }]
2307                 ],
2308                 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2309         };
2310         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2311         let mut second_payment_params = route_params.payment_params.clone();
2312         second_payment_params.previously_failed_channels = vec![chan_2_scid, chan_2_scid];
2313         // On retry, we'll only return one path
2314         route.paths.remove(1);
2315         route.paths[0][1].fee_msat = amt_msat;
2316         nodes[0].router.expect_find_route(RouteParameters {
2317                         payment_params: second_payment_params,
2318                         final_value_msat: amt_msat,
2319                 }, Ok(route.clone()));
2320
2321         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2322         let htlc_updates = SendEvent::from_node(&nodes[0]);
2323         check_added_monitors!(nodes[0], 1);
2324         assert_eq!(htlc_updates.msgs.len(), 1);
2325
2326         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
2327         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
2328         check_added_monitors!(nodes[1], 1);
2329         let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2330
2331         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2332         check_added_monitors!(nodes[0], 1);
2333         let second_htlc_updates = SendEvent::from_node(&nodes[0]);
2334
2335         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2336         check_added_monitors!(nodes[0], 1);
2337         let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2338
2339         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
2340         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
2341         check_added_monitors!(nodes[1], 1);
2342         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2343
2344         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2345         check_added_monitors!(nodes[1], 1);
2346         let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2347
2348         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2349         check_added_monitors!(nodes[0], 1);
2350
2351         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2352         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
2353         check_added_monitors!(nodes[0], 1);
2354         let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2355
2356         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2357         check_added_monitors!(nodes[1], 1);
2358         let bs_second_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2359
2360         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
2361         check_added_monitors!(nodes[1], 1);
2362         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2363
2364         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.update_fail_htlcs[0]);
2365         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_fail_update.commitment_signed);
2366         check_added_monitors!(nodes[0], 1);
2367
2368         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2369         check_added_monitors!(nodes[0], 1);
2370         let (as_third_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2371
2372         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_third_raa);
2373         check_added_monitors!(nodes[1], 1);
2374         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_fourth_cs);
2375         check_added_monitors!(nodes[1], 1);
2376         let bs_fourth_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2377
2378         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_fourth_raa);
2379         check_added_monitors!(nodes[0], 1);
2380
2381         // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two
2382         // pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second
2383         // with it set.
2384         //
2385         // Previously, we retried payments in an event consumer, which would retry each
2386         // `PaymentPathFailed` individually. In that setup, we had retried the payment in response to
2387         // the first `PaymentPathFailed`, then seen the second `PaymentPathFailed` with
2388         // `all_paths_failed` set and assumed the payment was completely failed. We ultimately fixed it
2389         // by adding the `PaymentFailed` event.
2390         //
2391         // Because we now retry payments as a batch, we simply return a single-path route in the
2392         // second, batched, request, have that fail, ensure the payment was abandoned.
2393         let mut events = nodes[0].node.get_and_clear_pending_events();
2394         assert_eq!(events.len(), 3);
2395         match events[0] {
2396                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
2397                         assert_eq!(payment_hash, ev_payment_hash);
2398                         assert_eq!(payment_failed_permanently, false);
2399                 },
2400                 _ => panic!("Unexpected event"),
2401         }
2402         match events[1] {
2403                 Event::PendingHTLCsForwardable { .. } => {},
2404                 _ => panic!("Unexpected event"),
2405         }
2406         match events[2] {
2407                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
2408                         assert_eq!(payment_hash, ev_payment_hash);
2409                         assert_eq!(payment_failed_permanently, false);
2410                 },
2411                 _ => panic!("Unexpected event"),
2412         }
2413
2414         nodes[0].node.process_pending_htlc_forwards();
2415         let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
2416         check_added_monitors!(nodes[0], 1);
2417
2418         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
2419         commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
2420         let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2421         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2422         commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true);
2423
2424         let mut events = nodes[0].node.get_and_clear_pending_events();
2425         assert_eq!(events.len(), 2);
2426         match events[0] {
2427                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
2428                         assert_eq!(payment_hash, ev_payment_hash);
2429                         assert_eq!(payment_failed_permanently, false);
2430                 },
2431                 _ => panic!("Unexpected event"),
2432         }
2433         match events[1] {
2434                 Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id } => {
2435                         assert_eq!(payment_hash, *ev_payment_hash);
2436                         assert_eq!(PaymentId(payment_hash.0), *ev_payment_id);
2437                 },
2438                 _ => panic!("Unexpected event"),
2439         }
2440 }
2441
2442 #[test]
2443 fn test_simple_partial_retry() {
2444         // In the first version of the in-`ChannelManager` payment retries, retries were sent for the
2445         // full amount of the payment, rather than only the missing amount. Here we simply test for
2446         // this by sending a payment with two parts, failing one, and retrying the second. Note that
2447         // `TestRouter` will check that the `RouteParameters` (which contain the amount) matches the
2448         // request.
2449         let chanmon_cfgs = create_chanmon_cfgs(3);
2450         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2451         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2452         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2453
2454         let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2455         let chan_2_scid = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0).0.contents.short_channel_id;
2456
2457         let amt_msat = 200_000_000;
2458         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
2459         #[cfg(feature = "std")]
2460         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2461         #[cfg(not(feature = "std"))]
2462         let payment_expiry_secs = 60 * 60;
2463         let mut invoice_features = InvoiceFeatures::empty();
2464         invoice_features.set_variable_length_onion_required();
2465         invoice_features.set_payment_secret_required();
2466         invoice_features.set_basic_mpp_optional();
2467         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2468                 .with_expiry_time(payment_expiry_secs as u64)
2469                 .with_features(invoice_features);
2470         let route_params = RouteParameters {
2471                 payment_params,
2472                 final_value_msat: amt_msat,
2473         };
2474
2475         let mut route = Route {
2476                 paths: vec![
2477                         vec![RouteHop {
2478                                 pubkey: nodes[1].node.get_our_node_id(),
2479                                 node_features: nodes[1].node.node_features(),
2480                                 short_channel_id: chan_1_scid,
2481                                 channel_features: nodes[1].node.channel_features(),
2482                                 fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee
2483                                 cltv_expiry_delta: 100,
2484                         }, RouteHop {
2485                                 pubkey: nodes[2].node.get_our_node_id(),
2486                                 node_features: nodes[2].node.node_features(),
2487                                 short_channel_id: chan_2_scid,
2488                                 channel_features: nodes[2].node.channel_features(),
2489                                 fee_msat: 100_000_000,
2490                                 cltv_expiry_delta: 100,
2491                         }],
2492                         vec![RouteHop {
2493                                 pubkey: nodes[1].node.get_our_node_id(),
2494                                 node_features: nodes[1].node.node_features(),
2495                                 short_channel_id: chan_1_scid,
2496                                 channel_features: nodes[1].node.channel_features(),
2497                                 fee_msat: 100_000,
2498                                 cltv_expiry_delta: 100,
2499                         }, RouteHop {
2500                                 pubkey: nodes[2].node.get_our_node_id(),
2501                                 node_features: nodes[2].node.node_features(),
2502                                 short_channel_id: chan_2_scid,
2503                                 channel_features: nodes[2].node.channel_features(),
2504                                 fee_msat: 100_000_000,
2505                                 cltv_expiry_delta: 100,
2506                         }]
2507                 ],
2508                 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2509         };
2510         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2511         let mut second_payment_params = route_params.payment_params.clone();
2512         second_payment_params.previously_failed_channels = vec![chan_2_scid];
2513         // On retry, we'll only be asked for one path (or 100k sats)
2514         route.paths.remove(0);
2515         nodes[0].router.expect_find_route(RouteParameters {
2516                         payment_params: second_payment_params,
2517                         final_value_msat: amt_msat / 2,
2518                 }, Ok(route.clone()));
2519
2520         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap();
2521         let htlc_updates = SendEvent::from_node(&nodes[0]);
2522         check_added_monitors!(nodes[0], 1);
2523         assert_eq!(htlc_updates.msgs.len(), 1);
2524
2525         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &htlc_updates.msgs[0]);
2526         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &htlc_updates.commitment_msg);
2527         check_added_monitors!(nodes[1], 1);
2528         let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2529
2530         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2531         check_added_monitors!(nodes[0], 1);
2532         let second_htlc_updates = SendEvent::from_node(&nodes[0]);
2533
2534         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2535         check_added_monitors!(nodes[0], 1);
2536         let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2537
2538         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]);
2539         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg);
2540         check_added_monitors!(nodes[1], 1);
2541         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2542
2543         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2544         check_added_monitors!(nodes[1], 1);
2545         let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2546
2547         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2548         check_added_monitors!(nodes[0], 1);
2549
2550         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]);
2551         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_fail_update.commitment_signed);
2552         check_added_monitors!(nodes[0], 1);
2553         let (as_second_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2554
2555         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2556         check_added_monitors!(nodes[1], 1);
2557
2558         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_third_cs);
2559         check_added_monitors!(nodes[1], 1);
2560
2561         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2562
2563         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2564         check_added_monitors!(nodes[0], 1);
2565
2566         let mut events = nodes[0].node.get_and_clear_pending_events();
2567         assert_eq!(events.len(), 2);
2568         match events[0] {
2569                 Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, ..  } => {
2570                         assert_eq!(payment_hash, ev_payment_hash);
2571                         assert_eq!(payment_failed_permanently, false);
2572                 },
2573                 _ => panic!("Unexpected event"),
2574         }
2575         match events[1] {
2576                 Event::PendingHTLCsForwardable { .. } => {},
2577                 _ => panic!("Unexpected event"),
2578         }
2579
2580         nodes[0].node.process_pending_htlc_forwards();
2581         let retry_htlc_updates = SendEvent::from_node(&nodes[0]);
2582         check_added_monitors!(nodes[0], 1);
2583
2584         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]);
2585         commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true);
2586
2587         expect_pending_htlcs_forwardable!(nodes[1]);
2588         check_added_monitors!(nodes[1], 1);
2589
2590         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
2591         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
2592         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
2593         commitment_signed_dance!(nodes[2], nodes[1], &bs_forward_update.commitment_signed, false);
2594
2595         expect_pending_htlcs_forwardable!(nodes[2]);
2596         expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat);
2597 }
2598
2599 #[test]
2600 #[cfg(feature = "std")]
2601 fn test_threaded_payment_retries() {
2602         // In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
2603         // a single thread and would happily let multiple threads run retries at the same time. Because
2604         // retries are done by first calculating the amount we need to retry, then dropping the
2605         // relevant lock, then actually sending, we would happily let multiple threads retry the same
2606         // amount at the same time, overpaying our original HTLC!
2607         let chanmon_cfgs = create_chanmon_cfgs(4);
2608         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2609         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2610         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2611
2612         // There is one mitigating guardrail when retrying payments - we can never over-pay by more
2613         // than 10% of the original value. Thus, we want all our retries to be below that. In order to
2614         // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
2615         // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
2616         // our channel.
2617         let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id;
2618         create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0);
2619         let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id;
2620         let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id;
2621
2622         let amt_msat = 100_000_000;
2623         let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat);
2624         #[cfg(feature = "std")]
2625         let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60;
2626         #[cfg(not(feature = "std"))]
2627         let payment_expiry_secs = 60 * 60;
2628         let mut invoice_features = InvoiceFeatures::empty();
2629         invoice_features.set_variable_length_onion_required();
2630         invoice_features.set_payment_secret_required();
2631         invoice_features.set_basic_mpp_optional();
2632         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV)
2633                 .with_expiry_time(payment_expiry_secs as u64)
2634                 .with_features(invoice_features);
2635         let mut route_params = RouteParameters {
2636                 payment_params,
2637                 final_value_msat: amt_msat,
2638         };
2639
2640         let mut route = Route {
2641                 paths: vec![
2642                         vec![RouteHop {
2643                                 pubkey: nodes[1].node.get_our_node_id(),
2644                                 node_features: nodes[1].node.node_features(),
2645                                 short_channel_id: chan_1_scid,
2646                                 channel_features: nodes[1].node.channel_features(),
2647                                 fee_msat: 0,
2648                                 cltv_expiry_delta: 100,
2649                         }, RouteHop {
2650                                 pubkey: nodes[3].node.get_our_node_id(),
2651                                 node_features: nodes[2].node.node_features(),
2652                                 short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown
2653                                 channel_features: nodes[2].node.channel_features(),
2654                                 fee_msat: amt_msat / 1000,
2655                                 cltv_expiry_delta: 100,
2656                         }],
2657                         vec![RouteHop {
2658                                 pubkey: nodes[2].node.get_our_node_id(),
2659                                 node_features: nodes[2].node.node_features(),
2660                                 short_channel_id: chan_3_scid,
2661                                 channel_features: nodes[2].node.channel_features(),
2662                                 fee_msat: 100_000,
2663                                 cltv_expiry_delta: 100,
2664                         }, RouteHop {
2665                                 pubkey: nodes[3].node.get_our_node_id(),
2666                                 node_features: nodes[3].node.node_features(),
2667                                 short_channel_id: chan_4_scid,
2668                                 channel_features: nodes[3].node.channel_features(),
2669                                 fee_msat: amt_msat - amt_msat / 1000,
2670                                 cltv_expiry_delta: 100,
2671                         }]
2672                 ],
2673                 payment_params: Some(PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)),
2674         };
2675         nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
2676
2677         nodes[0].node.send_payment_with_retry(payment_hash, &Some(payment_secret), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap();
2678         check_added_monitors!(nodes[0], 2);
2679         let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2680         assert_eq!(send_msg_events.len(), 2);
2681         send_msg_events.retain(|msg|
2682                 if let MessageSendEvent::UpdateHTLCs { node_id, .. } = msg {
2683                         // Drop the commitment update for nodes[2], we can just let that one sit pending
2684                         // forever.
2685                         *node_id == nodes[1].node.get_our_node_id()
2686                 } else { panic!(); }
2687         );
2688
2689         // from here on out, the retry `RouteParameters` amount will be amt/1000
2690         route_params.final_value_msat /= 1000;
2691         route.paths.pop();
2692
2693         let end_time = Instant::now() + Duration::from_secs(1);
2694         macro_rules! thread_body { () => { {
2695                 // We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
2696                 let node_ref = NodePtr::from_node(&nodes[0]);
2697                 move || {
2698                         let node_a = unsafe { &*node_ref.0 };
2699                         while Instant::now() < end_time {
2700                                 node_a.node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
2701                                 // Ignore if we have any pending events, just always pretend we just got a
2702                                 // PendingHTLCsForwardable
2703                                 node_a.node.process_pending_htlc_forwards();
2704                         }
2705                 }
2706         } } }
2707         let mut threads = Vec::new();
2708         for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); }
2709
2710         // Back in the main thread, poll pending messages and make sure that we never have more than
2711         // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
2712         // there are HTLC messages shoved in while its running. This allows us to test that we never
2713         // generate an additional update_add_htlc until we've fully failed the first.
2714         let mut previously_failed_channels = Vec::new();
2715         loop {
2716                 assert_eq!(send_msg_events.len(), 1);
2717                 let send_event = SendEvent::from_event(send_msg_events.pop().unwrap());
2718                 assert_eq!(send_event.msgs.len(), 1);
2719
2720                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2721                 commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
2722
2723                 // Note that we only push one route into `expect_find_route` at a time, because that's all
2724                 // the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
2725                 // we should still ultimately fail for the same reason - because we're trying to send too
2726                 // many HTLCs at once.
2727                 let mut new_route_params = route_params.clone();
2728                 previously_failed_channels.push(route.paths[0][1].short_channel_id);
2729                 new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone();
2730                 route.paths[0][1].short_channel_id += 1;
2731                 nodes[0].router.expect_find_route(new_route_params, Ok(route.clone()));
2732
2733                 let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2734                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]);
2735                 // The "normal" commitment_signed_dance delivers the final RAA and then calls
2736                 // `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
2737                 // This races with our other threads which may generate an add-HTLCs commitment update via
2738                 // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
2739                 // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
2740                 let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true);
2741                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &last_raa);
2742
2743                 let cur_time = Instant::now();
2744                 if cur_time > end_time {
2745                         for thread in threads.drain(..) { thread.join().unwrap(); }
2746                 }
2747
2748                 // Make sure we have some events to handle when we go around...
2749                 nodes[0].node.get_and_clear_pending_events(); // wipe the PendingHTLCsForwardable
2750                 nodes[0].node.process_pending_htlc_forwards();
2751                 send_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2752                 check_added_monitors!(nodes[0], 2);
2753
2754                 if cur_time > end_time {
2755                         break;
2756                 }
2757         }
2758 }
2759
2760 fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
2761         // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
2762         // receive the PaymentSent event even if the ChannelManager had no idea about the payment when
2763         // it was last persisted.
2764         let chanmon_cfgs = create_chanmon_cfgs(2);
2765         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2766         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2767         let (persister_a, persister_b, persister_c);
2768         let (chain_monitor_a, chain_monitor_b, chain_monitor_c);
2769         let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c);
2770         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2771
2772         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2773
2774         let mut nodes_0_serialized = Vec::new();
2775         if !persist_manager_with_payment {
2776                 nodes_0_serialized = nodes[0].node.encode();
2777         }
2778
2779         let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2780
2781         if persist_manager_with_payment {
2782                 nodes_0_serialized = nodes[0].node.encode();
2783         }
2784
2785         nodes[1].node.claim_funds(our_payment_preimage);
2786         check_added_monitors!(nodes[1], 1);
2787         expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000);
2788
2789         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2790         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2791         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
2792         check_added_monitors!(nodes[0], 1);
2793
2794         // The ChannelMonitor should always be the latest version, as we're required to persist it
2795         // during the commitment signed handling.
2796         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2797         reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized);
2798
2799         let events = nodes[0].node.get_and_clear_pending_events();
2800         assert_eq!(events.len(), 2);
2801         if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); }
2802         if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
2803         // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
2804         // the double-claim that would otherwise appear at the end of this test.
2805         nodes[0].node.timer_tick_occurred();
2806         let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2807         assert_eq!(as_broadcasted_txn.len(), 1);
2808
2809         // Ensure that, even after some time, if we restart we still include *something* in the current
2810         // `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
2811         // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
2812         // A naive implementation of the fix here would wipe the pending payments set, causing a
2813         // failure event when we restart.
2814         for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
2815
2816         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2817         reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b);
2818         let events = nodes[0].node.get_and_clear_pending_events();
2819         assert!(events.is_empty());
2820
2821         // Ensure that we don't generate any further events even after the channel-closing commitment
2822         // transaction is confirmed on-chain.
2823         confirm_transaction(&nodes[0], &as_broadcasted_txn[0]);
2824         for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); }
2825
2826         let events = nodes[0].node.get_and_clear_pending_events();
2827         assert!(events.is_empty());
2828
2829         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2830         reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c);
2831         let events = nodes[0].node.get_and_clear_pending_events();
2832         assert!(events.is_empty());
2833 }
2834
2835 #[test]
2836 fn no_missing_sent_on_midpoint_reload() {
2837         do_no_missing_sent_on_midpoint_reload(false);
2838         do_no_missing_sent_on_midpoint_reload(true);
2839 }