]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/payment_tests.rs
Merge pull request #1891 from tnull/2022-12-rename-payment-events
[rust-lightning] / lightning / src / ln / payment_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Tests that test the payment retry logic in ChannelManager, including various edge-cases around
11 //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
12 //! payments thereafter.
13
14 use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch};
15 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS};
16 use crate::chain::transaction::OutPoint;
17 use crate::chain::keysinterface::KeysInterface;
18 use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
19 use crate::ln::channelmanager::{self, BREAKDOWN_TIMEOUT, ChannelManager, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS};
20 use crate::ln::msgs;
21 use crate::ln::msgs::ChannelMessageHandler;
22 use crate::routing::gossip::RoutingFees;
23 use crate::routing::router::{get_route, PaymentParameters, RouteHint, RouteHintHop, RouteParameters};
24 use crate::util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider};
25 use crate::util::test_utils;
26 use crate::util::errors::APIError;
27 use crate::util::ser::Writeable;
28
29 use bitcoin::{Block, BlockHeader, TxMerkleNode};
30 use bitcoin::hashes::Hash;
31 use bitcoin::network::constants::Network;
32
33 use crate::prelude::*;
34
35 use crate::ln::functional_test_utils::*;
36 use crate::routing::gossip::NodeId;
37
38 #[test]
39 fn retry_single_path_payment() {
40         let chanmon_cfgs = create_chanmon_cfgs(3);
41         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
42         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
43         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
44
45         let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
46         let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
47         // Rebalance to find a route
48         send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
49
50         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
51
52         // Rebalance so that the first hop fails.
53         send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
54
55         // Make sure the payment fails on the first hop.
56         let payment_id = PaymentId(payment_hash.0);
57         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
58         check_added_monitors!(nodes[0], 1);
59         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
60         assert_eq!(events.len(), 1);
61         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
62         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
63         check_added_monitors!(nodes[1], 0);
64         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
65         expect_pending_htlcs_forwardable!(nodes[1]);
66         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
67         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
68         assert!(htlc_updates.update_add_htlcs.is_empty());
69         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
70         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
71         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
72         check_added_monitors!(nodes[1], 1);
73         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
74         commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
75         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
76
77         // Rebalance the channel so the retry succeeds.
78         send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
79
80         // Mine two blocks (we expire retries after 3, so this will check that we don't expire early)
81         connect_blocks(&nodes[0], 2);
82
83         // Retry the payment and make sure it succeeds.
84         nodes[0].node.retry_payment(&route, payment_id).unwrap();
85         check_added_monitors!(nodes[0], 1);
86         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
87         assert_eq!(events.len(), 1);
88         pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 100_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
89         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
90 }
91
92 #[test]
93 fn mpp_failure() {
94         let chanmon_cfgs = create_chanmon_cfgs(4);
95         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
96         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
97         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
98
99         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
100         let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
101         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
102         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
103
104         let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
105         let path = route.paths[0].clone();
106         route.paths.push(path);
107         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
108         route.paths[0][0].short_channel_id = chan_1_id;
109         route.paths[0][1].short_channel_id = chan_3_id;
110         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
111         route.paths[1][0].short_channel_id = chan_2_id;
112         route.paths[1][1].short_channel_id = chan_4_id;
113         send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret);
114         fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);
115 }
116
117 #[test]
118 fn mpp_retry() {
119         let chanmon_cfgs = create_chanmon_cfgs(4);
120         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
121         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
122         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
123
124         let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
125         let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
126         let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
127         let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
128         // Rebalance
129         send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
130
131         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 1_000_000);
132         let path = route.paths[0].clone();
133         route.paths.push(path);
134         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
135         route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
136         route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
137         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
138         route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
139         route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
140
141         // Initiate the MPP payment.
142         let payment_id = PaymentId(payment_hash.0);
143         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), payment_id).unwrap();
144         check_added_monitors!(nodes[0], 2); // one monitor per path
145         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
146         assert_eq!(events.len(), 2);
147
148         // Pass half of the payment along the success path.
149         let success_path_msgs = events.remove(0);
150         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None);
151
152         // Add the HTLC along the first hop.
153         let fail_path_msgs_1 = events.remove(0);
154         let (update_add, commitment_signed) = match fail_path_msgs_1 {
155                 MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
156                         assert_eq!(update_add_htlcs.len(), 1);
157                         assert!(update_fail_htlcs.is_empty());
158                         assert!(update_fulfill_htlcs.is_empty());
159                         assert!(update_fail_malformed_htlcs.is_empty());
160                         assert!(update_fee.is_none());
161                         (update_add_htlcs[0].clone(), commitment_signed.clone())
162                 },
163                 _ => panic!("Unexpected event"),
164         };
165         nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
166         commitment_signed_dance!(nodes[2], nodes[0], commitment_signed, false);
167
168         // Attempt to forward the payment and complete the 2nd path's failure.
169         expect_pending_htlcs_forwardable!(&nodes[2]);
170         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]);
171         let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
172         assert!(htlc_updates.update_add_htlcs.is_empty());
173         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
174         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
175         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
176         check_added_monitors!(nodes[2], 1);
177         nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
178         commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false);
179         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
180
181         // Rebalance the channel so the second half of the payment can succeed.
182         send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000);
183
184         // Make sure it errors as expected given a too-large amount.
185         if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, payment_id) {
186                 assert!(err.contains("over total_payment_amt_msat"));
187         } else { panic!("Unexpected error"); }
188
189         // Make sure it errors as expected given the wrong payment_id.
190         if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId([0; 32])) {
191                 assert!(err.contains("not found"));
192         } else { panic!("Unexpected error"); }
193
194         // Retry the second half of the payment and make sure it succeeds.
195         let mut path = route.clone();
196         path.paths.remove(0);
197         nodes[0].node.retry_payment(&path, payment_id).unwrap();
198         check_added_monitors!(nodes[0], 1);
199         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
200         assert_eq!(events.len(), 1);
201         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
202         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
203 }
204
205 fn do_mpp_receive_timeout(send_partial_mpp: bool) {
206         let chanmon_cfgs = create_chanmon_cfgs(4);
207         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
208         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
209         let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
210
211         let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
212         let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
213         let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
214         let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
215
216         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000);
217         let path = route.paths[0].clone();
218         route.paths.push(path);
219         route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
220         route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id;
221         route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id;
222         route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
223         route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id;
224         route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id;
225
226         // Initiate the MPP payment.
227         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
228         check_added_monitors!(nodes[0], 2); // one monitor per path
229         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
230         assert_eq!(events.len(), 2);
231
232         // Pass half of the payment along the first path.
233         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), false, None);
234
235         if send_partial_mpp {
236                 // Time out the partial MPP
237                 for _ in 0..MPP_TIMEOUT_TICKS {
238                         nodes[3].node.timer_tick_occurred();
239                 }
240
241                 // Failed HTLC from node 3 -> 1
242                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash }]);
243                 let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id());
244                 assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1);
245                 nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]);
246                 check_added_monitors!(nodes[3], 1);
247                 commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false);
248
249                 // Failed HTLC from node 1 -> 0
250                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]);
251                 let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
252                 assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1);
253                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]);
254                 check_added_monitors!(nodes[1], 1);
255                 commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false);
256
257                 expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..]));
258         } else {
259                 // Pass half of the payment along the second path.
260                 pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), events.remove(0), true, None);
261
262                 // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
263                 for _ in 0..MPP_TIMEOUT_TICKS {
264                         nodes[3].node.timer_tick_occurred();
265                 }
266
267                 claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
268         }
269 }
270
271 #[test]
272 fn mpp_receive_timeout() {
273         do_mpp_receive_timeout(true);
274         do_mpp_receive_timeout(false);
275 }
276
277 #[test]
278 fn retry_expired_payment() {
279         let chanmon_cfgs = create_chanmon_cfgs(3);
280         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
281         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
282         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
283
284         let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
285         let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
286         // Rebalance to find a route
287         send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000);
288
289         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000);
290
291         // Rebalance so that the first hop fails.
292         send_payment(&nodes[1], &vec!(&nodes[2])[..], 2_000_000);
293
294         // Make sure the payment fails on the first hop.
295         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
296         check_added_monitors!(nodes[0], 1);
297         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
298         assert_eq!(events.len(), 1);
299         let mut payment_event = SendEvent::from_event(events.pop().unwrap());
300         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
301         check_added_monitors!(nodes[1], 0);
302         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
303         expect_pending_htlcs_forwardable!(nodes[1]);
304         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 }]);
305         let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
306         assert!(htlc_updates.update_add_htlcs.is_empty());
307         assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
308         assert!(htlc_updates.update_fulfill_htlcs.is_empty());
309         assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
310         check_added_monitors!(nodes[1], 1);
311         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
312         commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false);
313         expect_payment_failed!(nodes[0], payment_hash, false);
314
315         // Mine blocks so the payment will have expired.
316         connect_blocks(&nodes[0], 3);
317
318         // Retry the payment and make sure it errors as expected.
319         if let Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { err })) = nodes[0].node.retry_payment(&route, PaymentId(payment_hash.0)) {
320                 assert!(err.contains("not found"));
321         } else {
322                 panic!("Unexpected error");
323         }
324 }
325
326 #[test]
327 fn no_pending_leak_on_initial_send_failure() {
328         // In an earlier version of our payment tracking, we'd have a retry entry even when the initial
329         // HTLC for payment failed to send due to local channel errors (e.g. peer disconnected). In this
330         // case, the user wouldn't have a PaymentId to retry the payment with, but we'd think we have a
331         // pending payment forever and never time it out.
332         // Here we test exactly that - retrying a payment when a peer was disconnected on the first
333         // try, and then check that no pending payment is being tracked.
334         let chanmon_cfgs = create_chanmon_cfgs(2);
335         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
336         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
337         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
338
339         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
340
341         let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
342
343         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
344         nodes[1].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
345
346         unwrap_send_err!(nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)),
347                 true, APIError::ChannelUnavailable { ref err },
348                 assert_eq!(err, "Peer for first hop currently disconnected/pending monitor update!"));
349
350         assert!(!nodes[0].node.has_pending_payments());
351 }
352
353 fn do_retry_with_no_persist(confirm_before_reload: bool) {
354         // If we send a pending payment and `send_payment` returns success, we should always either
355         // return a payment failure event or a payment success event, and on failure the payment should
356         // be retryable.
357         //
358         // In order to do so when the ChannelManager isn't immediately persisted (which is normal - its
359         // always persisted asynchronously), the ChannelManager has to reload some payment data from
360         // ChannelMonitor(s) in some cases. This tests that reloading.
361         //
362         // `confirm_before_reload` confirms the channel-closing commitment transaction on-chain prior
363         // to reloading the ChannelManager, increasing test coverage in ChannelMonitor HTLC tracking
364         // which has separate codepaths for "commitment transaction already confirmed" and not.
365         let chanmon_cfgs = create_chanmon_cfgs(3);
366         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
367         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
368         let persister: test_utils::TestPersister;
369         let new_chain_monitor: test_utils::TestChainMonitor;
370         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
371         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
372
373         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
374         let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
375
376         // Serialize the ChannelManager prior to sending payments
377         let nodes_0_serialized = nodes[0].node.encode();
378
379         // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
380         // out and retry.
381         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
382         let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000);
383         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
384         check_added_monitors!(nodes[0], 1);
385
386         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
387         assert_eq!(events.len(), 1);
388         let payment_event = SendEvent::from_event(events.pop().unwrap());
389         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
390
391         // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
392         // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
393         // which would prevent retry.
394         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
395         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
396
397         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
398         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true);
399         // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
400         let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
401
402         reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
403
404         let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone();
405         if confirm_before_reload {
406                 mine_transaction(&nodes[0], &as_commitment_tx);
407                 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
408         }
409
410         // The ChannelMonitor should always be the latest version, as we're required to persist it
411         // during the `commitment_signed_dance!()`.
412         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
413         reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
414
415         // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
416         // force-close the channel.
417         check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
418         assert!(nodes[0].node.list_channels().is_empty());
419         assert!(nodes[0].node.has_pending_payments());
420         let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
421         assert_eq!(as_broadcasted_txn.len(), 1);
422         assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
423
424         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
425         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
426         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
427
428         // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
429         // error, as the channel has hit the chain.
430         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
431         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
432         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
433         let as_err = nodes[0].node.get_and_clear_pending_msg_events();
434         assert_eq!(as_err.len(), 1);
435         match as_err[0] {
436                 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
437                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
438                         nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
439                         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
440                         check_added_monitors!(nodes[1], 1);
441                         assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
442                 },
443                 _ => panic!("Unexpected event"),
444         }
445         check_closed_broadcast!(nodes[1], false);
446
447         // Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when
448         // we close in a moment.
449         nodes[2].node.claim_funds(payment_preimage_1);
450         check_added_monitors!(nodes[2], 1);
451         expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
452
453         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
454         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
455         check_added_monitors!(nodes[1], 1);
456         commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
457         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
458
459         if confirm_before_reload {
460                 let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
461                 nodes[0].node.best_block_updated(&best_block.0.header, best_block.1);
462         }
463
464         // Create a new channel on which to retry the payment before we fail the payment via the
465         // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
466         // connecting several blocks while creating the channel (implying time has passed).
467         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
468         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
469
470         mine_transaction(&nodes[1], &as_commitment_tx);
471         let bs_htlc_claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
472         assert_eq!(bs_htlc_claim_txn.len(), 1);
473         check_spends!(bs_htlc_claim_txn[0], as_commitment_tx);
474
475         if !confirm_before_reload {
476                 mine_transaction(&nodes[0], &as_commitment_tx);
477         }
478         mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]);
479         expect_payment_sent!(nodes[0], payment_preimage_1);
480         connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20);
481         let as_htlc_timeout_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
482         assert_eq!(as_htlc_timeout_txn.len(), 2);
483         let (first_htlc_timeout_tx, second_htlc_timeout_tx) = (&as_htlc_timeout_txn[0], &as_htlc_timeout_txn[1]);
484         check_spends!(first_htlc_timeout_tx, as_commitment_tx);
485         check_spends!(second_htlc_timeout_tx, as_commitment_tx);
486         if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn[0].input[0].previous_output {
487                 confirm_transaction(&nodes[0], &second_htlc_timeout_tx);
488         } else {
489                 confirm_transaction(&nodes[0], &first_htlc_timeout_tx);
490         }
491         nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
492         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
493
494         // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
495         // reloaded) via a route over the new channel, which work without issue and eventually be
496         // received and claimed at the recipient just like any other payment.
497         let (mut new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], 1_000_000);
498
499         // Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
500         // and not the original fee. We also update node[1]'s relevant config as
501         // do_claim_payment_along_route expects us to never overpay.
502         {
503                 let mut channel_state = nodes[1].node.channel_state.lock().unwrap();
504                 let mut channel = channel_state.by_id.get_mut(&chan_id_2).unwrap();
505                 let mut new_config = channel.config();
506                 new_config.forwarding_fee_base_msat += 100_000;
507                 channel.update_config(&new_config);
508                 new_route.paths[0][0].fee_msat += 100_000;
509         }
510
511         // Force expiration of the channel's previous config.
512         for _ in 0..EXPIRE_PREV_CONFIG_TICKS {
513                 nodes[1].node.timer_tick_occurred();
514         }
515
516         assert!(nodes[0].node.retry_payment(&new_route, payment_id_1).is_err()); // Shouldn't be allowed to retry a fulfilled payment
517         nodes[0].node.retry_payment(&new_route, PaymentId(payment_hash.0)).unwrap();
518         check_added_monitors!(nodes[0], 1);
519         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
520         assert_eq!(events.len(), 1);
521         pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None);
522         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], false, payment_preimage);
523         expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0][0].fee_msat));
524 }
525
526 #[test]
527 fn retry_with_no_persist() {
528         do_retry_with_no_persist(true);
529         do_retry_with_no_persist(false);
530 }
531
532 fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
533         // Test that an off-chain completed payment is not retryable on restart. This was previously
534         // broken for dust payments, but we test for both dust and non-dust payments.
535         //
536         // `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
537         // output at all.
538         let chanmon_cfgs = create_chanmon_cfgs(3);
539         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
540
541         let mut manually_accept_config = test_default_channel_config();
542         manually_accept_config.manually_accept_inbound_channels = true;
543
544         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]);
545
546         let first_persister: test_utils::TestPersister;
547         let first_new_chain_monitor: test_utils::TestChainMonitor;
548         let first_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
549         let second_persister: test_utils::TestPersister;
550         let second_new_chain_monitor: test_utils::TestChainMonitor;
551         let second_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
552         let third_persister: test_utils::TestPersister;
553         let third_new_chain_monitor: test_utils::TestChainMonitor;
554         let third_nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
555
556         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
557
558         // Because we set nodes[1] to manually accept channels, just open a 0-conf channel.
559         let (funding_tx, chan_id) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
560         confirm_transaction(&nodes[0], &funding_tx);
561         confirm_transaction(&nodes[1], &funding_tx);
562         // Ignore the announcement_signatures messages
563         nodes[0].node.get_and_clear_pending_msg_events();
564         nodes[1].node.get_and_clear_pending_msg_events();
565         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
566
567         // Serialize the ChannelManager prior to sending payments
568         let mut nodes_0_serialized = nodes[0].node.encode();
569
570         let route = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }).0;
571         let (payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], if use_dust { 1_000 } else { 1_000_000 });
572
573         // The ChannelMonitor should always be the latest version, as we're required to persist it
574         // during the `commitment_signed_dance!()`.
575         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
576
577         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized);
578         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
579
580         // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
581         // force-close the channel.
582         check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
583         assert!(nodes[0].node.list_channels().is_empty());
584         assert!(nodes[0].node.has_pending_payments());
585         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
586
587         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
588         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
589
590         // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
591         // error, as the channel has hit the chain.
592         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
593         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
594         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
595         let as_err = nodes[0].node.get_and_clear_pending_msg_events();
596         assert_eq!(as_err.len(), 1);
597         let bs_commitment_tx;
598         match as_err[0] {
599                 MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
600                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
601                         nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), msg);
602                         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_string() });
603                         check_added_monitors!(nodes[1], 1);
604                         bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
605                 },
606                 _ => panic!("Unexpected event"),
607         }
608         check_closed_broadcast!(nodes[1], false);
609
610         // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
611         // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
612         // incoming HTLCs with the same payment hash later.
613         nodes[2].node.fail_htlc_backwards(&payment_hash);
614         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCDestination::FailedPayment { payment_hash }]);
615         check_added_monitors!(nodes[2], 1);
616
617         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
618         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]);
619         commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
620         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1],
621                 [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
622
623         // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
624         // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
625         // after the commitment transaction, so always connect the commitment transaction.
626         mine_transaction(&nodes[0], &bs_commitment_tx[0]);
627         mine_transaction(&nodes[1], &bs_commitment_tx[0]);
628         if !use_dust {
629                 connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
630                 connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1 + (MIN_CLTV_EXPIRY_DELTA as u32));
631                 let as_htlc_timeout = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
632                 check_spends!(as_htlc_timeout[0], bs_commitment_tx[0]);
633                 assert_eq!(as_htlc_timeout.len(), 1);
634
635                 mine_transaction(&nodes[0], &as_htlc_timeout[0]);
636                 // nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
637                 nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
638                 mine_transaction(&nodes[1], &as_htlc_timeout[0]);
639         }
640
641         // Create a new channel on which to retry the payment before we fail the payment via the
642         // HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
643         // connecting several blocks while creating the channel (implying time has passed).
644         // We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
645         let (_, chan_id_3) = open_zero_conf_channel(&nodes[0], &nodes[1], None);
646         assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
647
648         // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
649         // confirming, we will fail as it's considered still-pending...
650         let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 });
651         assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
652         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
653
654         // After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
655         // again. We serialize the node first as we'll then test retrying the HTLC after a restart
656         // (which should also still work).
657         connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
658         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
659         // We set mpp_parts_remain to avoid having abandon_payment called
660         expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain());
661
662         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
663         let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
664         nodes_0_serialized = nodes[0].node.encode();
665
666         assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
667         assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty());
668
669         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized);
670         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
671
672         reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
673
674         // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
675         // the payment is not (spuriously) listed as still pending.
676         assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_ok());
677         check_added_monitors!(nodes[0], 1);
678         pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret);
679         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
680
681         assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
682         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
683
684         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
685         let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode();
686         nodes_0_serialized = nodes[0].node.encode();
687
688         // Ensure that after reload we cannot retry the payment.
689         reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized);
690         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
691
692         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
693
694         assert!(nodes[0].node.retry_payment(&new_route, payment_id).is_err());
695         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
696 }
697
698 #[test]
699 fn test_completed_payment_not_retryable_on_reload() {
700         do_test_completed_payment_not_retryable_on_reload(true);
701         do_test_completed_payment_not_retryable_on_reload(false);
702 }
703
704
705 fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) {
706         // When a Channel is closed, any outbound HTLCs which were relayed through it are simply
707         // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
708         // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
709         // the ChannelMonitor tells it to.
710         //
711         // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
712         // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
713         // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
714         let chanmon_cfgs = create_chanmon_cfgs(2);
715         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
716         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
717         let persister: test_utils::TestPersister;
718         let new_chain_monitor: test_utils::TestChainMonitor;
719         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
720         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
721
722         let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
723
724         // Route a payment, but force-close the channel before the HTLC fulfill message arrives at
725         // nodes[0].
726         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
727         nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
728         check_closed_broadcast!(nodes[0], true);
729         check_added_monitors!(nodes[0], 1);
730         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
731
732         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
733         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
734
735         // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
736         connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
737         let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
738         assert_eq!(node_txn.len(), 3);
739         assert_eq!(node_txn[0], node_txn[1]);
740         check_spends!(node_txn[1], funding_tx);
741         check_spends!(node_txn[2], node_txn[1]);
742         let timeout_txn = vec![node_txn[2].clone()];
743
744         nodes[1].node.claim_funds(payment_preimage);
745         check_added_monitors!(nodes[1], 1);
746         expect_payment_claimed!(nodes[1], payment_hash, 10_000_000);
747
748         let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
749         connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
750         check_closed_broadcast!(nodes[1], true);
751         check_added_monitors!(nodes[1], 1);
752         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
753         let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
754         assert_eq!(claim_txn.len(), 3);
755         check_spends!(claim_txn[0], node_txn[1]);
756         check_spends!(claim_txn[1], funding_tx);
757         check_spends!(claim_txn[2], claim_txn[1]);
758
759         header.prev_blockhash = nodes[0].best_block_hash();
760         connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
761
762         if confirm_commitment_tx {
763                 connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1);
764         }
765
766         header.prev_blockhash = nodes[0].best_block_hash();
767         let claim_block = Block { header, txdata: if payment_timeout { timeout_txn } else { vec![claim_txn[0].clone()] } };
768
769         if payment_timeout {
770                 assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV!
771                 connect_block(&nodes[0], &claim_block);
772                 connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2);
773         }
774
775         // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
776         // returning InProgress. This should cause the claim event to never make its way to the
777         // ChannelManager.
778         chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
779         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
780
781         if payment_timeout {
782                 connect_blocks(&nodes[0], 1);
783         } else {
784                 connect_block(&nodes[0], &claim_block);
785         }
786
787         let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
788         let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
789                 .get_mut(&funding_txo).unwrap().drain().collect();
790         // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
791         // If we're testing connection idempotency we may get substantially more.
792         assert!(mon_updates.len() >= 1);
793         assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
794         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
795
796         // If we persist the ChannelManager here, we should get the PaymentSent event after
797         // deserialization.
798         let mut chan_manager_serialized = Vec::new();
799         if !persist_manager_post_event {
800                 chan_manager_serialized = nodes[0].node.encode();
801         }
802
803         // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
804         // payment sent event.
805         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
806         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
807         for update in mon_updates {
808                 nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap();
809         }
810         if payment_timeout {
811                 expect_payment_failed!(nodes[0], payment_hash, false);
812         } else {
813                 expect_payment_sent!(nodes[0], payment_preimage);
814         }
815
816         // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
817         // twice.
818         if persist_manager_post_event {
819                 chan_manager_serialized = nodes[0].node.encode();
820         }
821
822         // Now reload nodes[0]...
823         reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
824
825         if persist_manager_post_event {
826                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
827         } else if payment_timeout {
828                 expect_payment_failed!(nodes[0], payment_hash, false);
829         } else {
830                 expect_payment_sent!(nodes[0], payment_preimage);
831         }
832
833         // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
834         // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
835         // payment events should kick in, leaving us with no pending events here.
836         let height = nodes[0].blocks.lock().unwrap().len() as u32 - 1;
837         nodes[0].chain_monitor.chain_monitor.block_connected(&claim_block, height);
838         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
839 }
840
841 #[test]
842 fn test_dup_htlc_onchain_fails_on_reload() {
843         do_test_dup_htlc_onchain_fails_on_reload(true, true, true);
844         do_test_dup_htlc_onchain_fails_on_reload(true, true, false);
845         do_test_dup_htlc_onchain_fails_on_reload(true, false, false);
846         do_test_dup_htlc_onchain_fails_on_reload(false, true, true);
847         do_test_dup_htlc_onchain_fails_on_reload(false, true, false);
848         do_test_dup_htlc_onchain_fails_on_reload(false, false, false);
849 }
850
851 #[test]
852 fn test_fulfill_restart_failure() {
853         // When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully
854         // fulfilled. At this point, the peer can reconnect and decide to either fulfill the HTLC
855         // again, or fail it, giving us free money.
856         //
857         // Of course probably they won't fail it and give us free money, but because we have code to
858         // handle it, we should test the logic for it anyway. We do that here.
859         let chanmon_cfgs = create_chanmon_cfgs(2);
860         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
861         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
862         let persister: test_utils::TestPersister;
863         let new_chain_monitor: test_utils::TestChainMonitor;
864         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
865         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
866
867         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
868         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
869
870         // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
871         // pre-fulfill, which we do by serializing it here.
872         let chan_manager_serialized = nodes[1].node.encode();
873         let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode();
874
875         nodes[1].node.claim_funds(payment_preimage);
876         check_added_monitors!(nodes[1], 1);
877         expect_payment_claimed!(nodes[1], payment_hash, 100_000);
878
879         let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
880         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
881         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
882
883         // Now reload nodes[1]...
884         reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
885
886         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
887         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
888
889         nodes[1].node.fail_htlc_backwards(&payment_hash);
890         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
891         check_added_monitors!(nodes[1], 1);
892         let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
893         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]);
894         commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false);
895         // nodes[0] shouldn't generate any events here, while it just got a payment failure completion
896         // it had already considered the payment fulfilled, and now they just got free money.
897         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
898 }
899
900 #[test]
901 fn get_ldk_payment_preimage() {
902         // Ensure that `ChannelManager::get_payment_preimage` can successfully be used to claim a payment.
903         let chanmon_cfgs = create_chanmon_cfgs(2);
904         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
905         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
906         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
907         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
908
909         let amt_msat = 60_000;
910         let expiry_secs = 60 * 60;
911         let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs).unwrap();
912
913         let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id())
914                 .with_features(channelmanager::provided_invoice_features());
915         let scorer = test_utils::TestScorer::with_penalty(0);
916         let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
917         let random_seed_bytes = keys_manager.get_secure_random_bytes();
918         let route = get_route(
919                 &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(),
920                 Some(&nodes[0].node.list_usable_channels().iter().collect::<Vec<_>>()),
921                 amt_msat, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap();
922         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
923         check_added_monitors!(nodes[0], 1);
924
925         // Make sure to use `get_payment_preimage`
926         let payment_preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
927         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
928         assert_eq!(events.len(), 1);
929         pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage));
930         claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, payment_preimage);
931 }
932
933 #[test]
934 fn sent_probe_is_probe_of_sending_node() {
935         let chanmon_cfgs = create_chanmon_cfgs(3);
936         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
937         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
938         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
939
940         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
941         create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
942
943         // First check we refuse to build a single-hop probe
944         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
945         assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err());
946
947         // Then build an actual two-hop probing path
948         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
949
950         match nodes[0].node.send_probe(route.paths[0].clone()) {
951                 Ok((payment_hash, payment_id)) => {
952                         assert!(nodes[0].node.payment_is_probe(&payment_hash, &payment_id));
953                         assert!(!nodes[1].node.payment_is_probe(&payment_hash, &payment_id));
954                         assert!(!nodes[2].node.payment_is_probe(&payment_hash, &payment_id));
955                 },
956                 _ => panic!(),
957         }
958
959         get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
960         check_added_monitors!(nodes[0], 1);
961 }
962
963 #[test]
964 fn successful_probe_yields_event() {
965         let chanmon_cfgs = create_chanmon_cfgs(3);
966         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
967         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
968         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
969
970         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
971         create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
972
973         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000);
974
975         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
976
977         // node[0] -- update_add_htlcs -> node[1]
978         check_added_monitors!(nodes[0], 1);
979         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
980         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
981         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
982         check_added_monitors!(nodes[1], 0);
983         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
984         expect_pending_htlcs_forwardable!(nodes[1]);
985
986         // node[1] -- update_add_htlcs -> node[2]
987         check_added_monitors!(nodes[1], 1);
988         let updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
989         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
990         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &probe_event.msgs[0]);
991         check_added_monitors!(nodes[2], 0);
992         commitment_signed_dance!(nodes[2], nodes[1], probe_event.commitment_msg, true, true);
993
994         // node[1] <- update_fail_htlcs -- node[2]
995         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
996         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
997         check_added_monitors!(nodes[1], 0);
998         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, true);
999
1000         // node[0] <- update_fail_htlcs -- node[1]
1001         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1002         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
1003         check_added_monitors!(nodes[0], 0);
1004         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1005
1006         let mut events = nodes[0].node.get_and_clear_pending_events();
1007         assert_eq!(events.len(), 1);
1008         match events.drain(..).next().unwrap() {
1009                 crate::util::events::Event::ProbeSuccessful { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
1010                         assert_eq!(payment_id, ev_pid);
1011                         assert_eq!(payment_hash, ev_ph);
1012                 },
1013                 _ => panic!(),
1014         };
1015 }
1016
1017 #[test]
1018 fn failed_probe_yields_event() {
1019         let chanmon_cfgs = create_chanmon_cfgs(3);
1020         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1021         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]);
1022         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1023
1024         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1025         create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1026
1027         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
1028
1029         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 9_998_000, 42);
1030
1031         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
1032
1033         // node[0] -- update_add_htlcs -> node[1]
1034         check_added_monitors!(nodes[0], 1);
1035         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1036         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
1037         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
1038         check_added_monitors!(nodes[1], 0);
1039         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
1040         expect_pending_htlcs_forwardable!(nodes[1]);
1041
1042         // node[0] <- update_fail_htlcs -- node[1]
1043         check_added_monitors!(nodes[1], 1);
1044         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1045         // Skip the PendingHTLCsForwardable event
1046         let _events = nodes[1].node.get_and_clear_pending_events();
1047         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
1048         check_added_monitors!(nodes[0], 0);
1049         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1050
1051         let mut events = nodes[0].node.get_and_clear_pending_events();
1052         assert_eq!(events.len(), 1);
1053         match events.drain(..).next().unwrap() {
1054                 crate::util::events::Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
1055                         assert_eq!(payment_id, ev_pid);
1056                         assert_eq!(payment_hash, ev_ph);
1057                 },
1058                 _ => panic!(),
1059         };
1060 }
1061
1062 #[test]
1063 fn onchain_failed_probe_yields_event() {
1064         // Tests that an attempt to probe over a channel that is eventaully closed results in a failure
1065         // event.
1066         let chanmon_cfgs = create_chanmon_cfgs(3);
1067         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1068         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1069         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1070
1071         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1072         create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1073
1074         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id());
1075
1076         // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
1077         let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], &payment_params, 1_000, 42);
1078         let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap();
1079
1080         // node[0] -- update_add_htlcs -> node[1]
1081         check_added_monitors!(nodes[0], 1);
1082         let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1083         let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), updates);
1084         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &probe_event.msgs[0]);
1085         check_added_monitors!(nodes[1], 0);
1086         commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false);
1087         expect_pending_htlcs_forwardable!(nodes[1]);
1088
1089         check_added_monitors!(nodes[1], 1);
1090         let _ = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1091
1092         // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on
1093         // Node A, which after 6 confirmations should result in a probe failure event.
1094         let bs_txn = get_local_commitment_txn!(nodes[1], chan_id);
1095         confirm_transaction(&nodes[0], &bs_txn[0]);
1096         check_closed_broadcast!(&nodes[0], true);
1097         check_added_monitors!(nodes[0], 1);
1098
1099         let mut events = nodes[0].node.get_and_clear_pending_events();
1100         assert_eq!(events.len(), 2);
1101         let mut found_probe_failed = false;
1102         for event in events.drain(..) {
1103                 match event {
1104                         Event::ProbeFailed { payment_id: ev_pid, payment_hash: ev_ph, .. } => {
1105                                 assert_eq!(payment_id, ev_pid);
1106                                 assert_eq!(payment_hash, ev_ph);
1107                                 found_probe_failed = true;
1108                         },
1109                         Event::ChannelClosed { .. } => {},
1110                         _ => panic!(),
1111                 }
1112         }
1113         assert!(found_probe_failed);
1114 }
1115
1116 #[test]
1117 fn claimed_send_payment_idempotent() {
1118         // Tests that `send_payment` (and friends) are (reasonably) idempotent.
1119         let chanmon_cfgs = create_chanmon_cfgs(2);
1120         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1121         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1122         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1123
1124         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1125
1126         let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1127         let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1128
1129         macro_rules! check_send_rejected {
1130                 () => {
1131                         // If we try to resend a new payment with a different payment_hash but with the same
1132                         // payment_id, it should be rejected.
1133                         let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1134                         match send_result {
1135                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1136                                 _ => panic!("Unexpected send result: {:?}", send_result),
1137                         }
1138
1139                         // Further, if we try to send a spontaneous payment with the same payment_id it should
1140                         // also be rejected.
1141                         let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1142                         match send_result {
1143                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1144                                 _ => panic!("Unexpected send result: {:?}", send_result),
1145                         }
1146                 }
1147         }
1148
1149         check_send_rejected!();
1150
1151         // Claim the payment backwards, but note that the PaymentSent event is still pending and has
1152         // not been seen by the user. At this point, from the user perspective nothing has changed, so
1153         // we must remain just as idempotent as we were before.
1154         do_claim_payment_along_route(&nodes[0], &[&[&nodes[1]]], false, first_payment_preimage);
1155
1156         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1157                 nodes[0].node.timer_tick_occurred();
1158         }
1159
1160         check_send_rejected!();
1161
1162         // Once the user sees and handles the `PaymentSent` event, we expect them to no longer call
1163         // `send_payment`, and our idempotency guarantees are off - they should have atomically marked
1164         // the payment complete. However, they could have called `send_payment` while the event was
1165         // being processed, leading to a race in our idempotency guarantees. Thus, even immediately
1166         // after the event is handled a duplicate payment should sitll be rejected.
1167         expect_payment_sent!(&nodes[0], first_payment_preimage, Some(0));
1168         check_send_rejected!();
1169
1170         // If relatively little time has passed, a duplicate payment should still fail.
1171         nodes[0].node.timer_tick_occurred();
1172         check_send_rejected!();
1173
1174         // However, after some time has passed (at least more than the one timer tick above), a
1175         // duplicate payment should go through, as ChannelManager should no longer have any remaining
1176         // references to the old payment data.
1177         for _ in 0..IDEMPOTENCY_TIMEOUT_TICKS {
1178                 nodes[0].node.timer_tick_occurred();
1179         }
1180
1181         nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1182         check_added_monitors!(nodes[0], 1);
1183         pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1184         claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1185 }
1186
1187 #[test]
1188 fn abandoned_send_payment_idempotent() {
1189         // Tests that `send_payment` (and friends) allow duplicate PaymentIds immediately after
1190         // abandon_payment.
1191         let chanmon_cfgs = create_chanmon_cfgs(2);
1192         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1193         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1194         let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1195
1196         create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1197
1198         let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
1199         let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000);
1200
1201         macro_rules! check_send_rejected {
1202                 () => {
1203                         // If we try to resend a new payment with a different payment_hash but with the same
1204                         // payment_id, it should be rejected.
1205                         let send_result = nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id);
1206                         match send_result {
1207                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1208                                 _ => panic!("Unexpected send result: {:?}", send_result),
1209                         }
1210
1211                         // Further, if we try to send a spontaneous payment with the same payment_id it should
1212                         // also be rejected.
1213                         let send_result = nodes[0].node.send_spontaneous_payment(&route, None, payment_id);
1214                         match send_result {
1215                                 Err(PaymentSendFailure::DuplicatePayment) => {},
1216                                 _ => panic!("Unexpected send result: {:?}", send_result),
1217                         }
1218                 }
1219         }
1220
1221         check_send_rejected!();
1222
1223         nodes[1].node.fail_htlc_backwards(&first_payment_hash);
1224         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
1225
1226         pass_failed_payment_back_no_abandon(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash);
1227         check_send_rejected!();
1228
1229         // Until we abandon the payment, no matter how many timer ticks pass, we still cannot reuse the
1230         // PaymentId.
1231         for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
1232                 nodes[0].node.timer_tick_occurred();
1233         }
1234         check_send_rejected!();
1235
1236         nodes[0].node.abandon_payment(payment_id);
1237         get_event!(nodes[0], Event::PaymentFailed);
1238
1239         // However, we can reuse the PaymentId immediately after we `abandon_payment`.
1240         nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret), payment_id).unwrap();
1241         check_added_monitors!(nodes[0], 1);
1242         pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret);
1243         claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage);
1244 }
1245
1246 #[derive(PartialEq)]
1247 enum InterceptTest {
1248         Forward,
1249         Fail,
1250         Timeout,
1251 }
1252
1253 #[test]
1254 fn test_trivial_inflight_htlc_tracking(){
1255         // In this test, we test three scenarios:
1256         // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs
1257         // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs
1258         // (3) After we claim the payment sent in (2), InFlightHtlcs should return `None` for the query.
1259         let chanmon_cfgs = create_chanmon_cfgs(3);
1260         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1261         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1262         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1263
1264         let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1265         let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features());
1266
1267         // Send and claim the payment. Inflight HTLCs should be empty.
1268         send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
1269         {
1270                 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1271
1272                 let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
1273                 let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
1274                 let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
1275                 let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
1276
1277                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1278                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1279                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1280                         channel_1.get_short_channel_id().unwrap()
1281                 );
1282                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1283                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1284                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1285                         channel_2.get_short_channel_id().unwrap()
1286                 );
1287
1288                 assert_eq!(chan_1_used_liquidity, None);
1289                 assert_eq!(chan_2_used_liquidity, None);
1290         }
1291
1292         // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
1293         let (payment_preimage, _,  _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 500000);
1294         {
1295                 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1296
1297                 let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
1298                 let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
1299                 let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
1300                 let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
1301
1302                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1303                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1304                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1305                         channel_1.get_short_channel_id().unwrap()
1306                 );
1307                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1308                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1309                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1310                         channel_2.get_short_channel_id().unwrap()
1311                 );
1312
1313                 // First hop accounts for expected 1000 msat fee
1314                 assert_eq!(chan_1_used_liquidity, Some(501000));
1315                 assert_eq!(chan_2_used_liquidity, Some(500000));
1316         }
1317
1318         // Now, let's claim the payment. This should result in the used liquidity to return `None`.
1319         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
1320         {
1321                 let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1322
1323                 let node_0_channel_lock = nodes[0].node.channel_state.lock().unwrap();
1324                 let node_1_channel_lock = nodes[1].node.channel_state.lock().unwrap();
1325                 let channel_1 = node_0_channel_lock.by_id.get(&chan_1_id).unwrap();
1326                 let channel_2 = node_1_channel_lock.by_id.get(&chan_2_id).unwrap();
1327
1328                 let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
1329                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1330                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1331                         channel_1.get_short_channel_id().unwrap()
1332                 );
1333                 let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
1334                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
1335                         &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
1336                         channel_2.get_short_channel_id().unwrap()
1337                 );
1338
1339                 assert_eq!(chan_1_used_liquidity, None);
1340                 assert_eq!(chan_2_used_liquidity, None);
1341         }
1342 }
1343
1344 #[test]
1345 fn test_holding_cell_inflight_htlcs() {
1346         let chanmon_cfgs = create_chanmon_cfgs(2);
1347         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1348         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1349         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1350         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1351
1352         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1353         let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1354
1355         // Queue up two payments - one will be delivered right away, one immediately goes into the
1356         // holding cell as nodes[0] is AwaitingRAA.
1357         {
1358                 nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1359                 check_added_monitors!(nodes[0], 1);
1360                 nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1361                 check_added_monitors!(nodes[0], 0);
1362         }
1363
1364         let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
1365
1366         {
1367                 let channel_lock = nodes[0].node.channel_state.lock().unwrap();
1368                 let channel = channel_lock.by_id.get(&channel_id).unwrap();
1369
1370                 let used_liquidity = inflight_htlcs.used_liquidity_msat(
1371                         &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
1372                         &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
1373                         channel.get_short_channel_id().unwrap()
1374                 );
1375
1376                 assert_eq!(used_liquidity, Some(2000000));
1377         }
1378
1379         // Clear pending events so test doesn't throw a "Had excess message on node..." error
1380         nodes[0].node.get_and_clear_pending_msg_events();
1381 }
1382
1383 #[test]
1384 fn intercepted_payment() {
1385         // Test that detecting an intercept scid on payment forward will signal LDK to generate an
1386         // intercept event, which the LSP can then use to either (a) open a JIT channel to forward the
1387         // payment or (b) fail the payment.
1388         do_test_intercepted_payment(InterceptTest::Forward);
1389         do_test_intercepted_payment(InterceptTest::Fail);
1390         // Make sure that intercepted payments will be automatically failed back if too many blocks pass.
1391         do_test_intercepted_payment(InterceptTest::Timeout);
1392 }
1393
1394 fn do_test_intercepted_payment(test: InterceptTest) {
1395         let chanmon_cfgs = create_chanmon_cfgs(3);
1396         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1397
1398         let mut zero_conf_chan_config = test_default_channel_config();
1399         zero_conf_chan_config.manually_accept_inbound_channels = true;
1400         let mut intercept_forwards_config = test_default_channel_config();
1401         intercept_forwards_config.accept_intercept_htlcs = true;
1402         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]);
1403
1404         let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1405         let scorer = test_utils::TestScorer::with_penalty(0);
1406         let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes();
1407
1408         let _ = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
1409
1410         let amt_msat = 100_000;
1411         let intercept_scid = nodes[1].node.get_intercept_scid();
1412         let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
1413                 .with_route_hints(vec![
1414                         RouteHint(vec![RouteHintHop {
1415                                 src_node_id: nodes[1].node.get_our_node_id(),
1416                                 short_channel_id: intercept_scid,
1417                                 fees: RoutingFees {
1418                                         base_msat: 1000,
1419                                         proportional_millionths: 0,
1420                                 },
1421                                 cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA,
1422                                 htlc_minimum_msat: None,
1423                                 htlc_maximum_msat: None,
1424                         }])
1425                 ])
1426                 .with_features(channelmanager::provided_invoice_features());
1427         let route_params = RouteParameters {
1428                 payment_params,
1429                 final_value_msat: amt_msat,
1430                 final_cltv_expiry_delta: TEST_FINAL_CLTV,
1431         };
1432         let route = get_route(
1433                 &nodes[0].node.get_our_node_id(), &route_params.payment_params,
1434                 &nodes[0].network_graph.read_only(), None, route_params.final_value_msat,
1435                 route_params.final_cltv_expiry_delta, nodes[0].logger, &scorer, &random_seed_bytes
1436         ).unwrap();
1437
1438         let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60).unwrap();
1439         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
1440         let payment_event = {
1441                 {
1442                         let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
1443                         assert_eq!(added_monitors.len(), 1);
1444                         added_monitors.clear();
1445                 }
1446                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1447                 assert_eq!(events.len(), 1);
1448                 SendEvent::from_event(events.remove(0))
1449         };
1450         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1451         commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true);
1452
1453         // Check that we generate the PaymentIntercepted event when an intercept forward is detected.
1454         let events = nodes[1].node.get_and_clear_pending_events();
1455         assert_eq!(events.len(), 1);
1456         let (intercept_id, expected_outbound_amount_msat) = match events[0] {
1457                 crate::util::events::Event::HTLCIntercepted {
1458                         intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id
1459                 } => {
1460                         assert_eq!(pmt_hash, payment_hash);
1461                         assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees());
1462                         assert_eq!(short_channel_id, intercept_scid);
1463                         (intercept_id, expected_outbound_amount_msat)
1464                 },
1465                 _ => panic!()
1466         };
1467
1468         // Check for unknown channel id error.
1469         let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &[42; 32], nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1470         assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable  { err: format!("Channel with id {} not found", log_bytes!([42; 32])) });
1471
1472         if test == InterceptTest::Fail {
1473                 // Ensure we can fail the intercepted payment back.
1474                 nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap();
1475                 expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::UnknownNextHop { requested_forward_scid: intercept_scid }]);
1476                 nodes[1].node.process_pending_htlc_forwards();
1477                 let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1478                 check_added_monitors!(&nodes[1], 1);
1479                 assert!(update_fail.update_fail_htlcs.len() == 1);
1480                 let fail_msg = update_fail.update_fail_htlcs[0].clone();
1481                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg);
1482                 commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false);
1483
1484                 // Ensure the payment fails with the expected error.
1485                 let fail_conditions = PaymentFailedConditions::new()
1486                         .blamed_scid(intercept_scid)
1487                         .blamed_chan_closed(true)
1488                         .expected_htlc_error_data(0x4000 | 10, &[]);
1489                 expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions);
1490         } else if test == InterceptTest::Forward {
1491                 // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet.
1492                 let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
1493                 let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1494                 assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(temp_chan_id)) });
1495                 assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
1496
1497                 // Open the just-in-time channel so the payment can then be forwarded.
1498                 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1499
1500                 // Finally, forward the intercepted payment through and claim it.
1501                 nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap();
1502                 expect_pending_htlcs_forwardable!(nodes[1]);
1503
1504                 let payment_event = {
1505                         {
1506                                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1507                                 assert_eq!(added_monitors.len(), 1);
1508                                 added_monitors.clear();
1509                         }
1510                         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
1511                         assert_eq!(events.len(), 1);
1512                         SendEvent::from_event(events.remove(0))
1513                 };
1514                 nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
1515                 commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true);
1516                 expect_pending_htlcs_forwardable!(nodes[2]);
1517
1518                 let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap();
1519                 expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id());
1520                 do_claim_payment_along_route(&nodes[0], &vec!(&vec!(&nodes[1], &nodes[2])[..]), false, payment_preimage);
1521                 let events = nodes[0].node.get_and_clear_pending_events();
1522                 assert_eq!(events.len(), 2);
1523                 match events[0] {
1524                         Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
1525                                 assert_eq!(payment_preimage, *ev_preimage);
1526                                 assert_eq!(payment_hash, *ev_hash);
1527                                 assert_eq!(fee_paid_msat, &Some(1000));
1528                         },
1529                         _ => panic!("Unexpected event")
1530                 }
1531                 match events[1] {
1532                         Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
1533                                 assert_eq!(hash, Some(payment_hash));
1534                         },
1535                         _ => panic!("Unexpected event")
1536                 }
1537         } else if test == InterceptTest::Timeout {
1538                 let mut block = Block {
1539                         header: BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
1540                         txdata: vec![],
1541                 };
1542                 connect_block(&nodes[0], &block);
1543                 connect_block(&nodes[1], &block);
1544                 for _ in 0..TEST_FINAL_CLTV {
1545                         block.header.prev_blockhash = block.block_hash();
1546                         connect_block(&nodes[0], &block);
1547                         connect_block(&nodes[1], &block);
1548                 }
1549                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::InvalidForward { requested_forward_scid: intercept_scid }]);
1550                 check_added_monitors!(nodes[1], 1);
1551                 let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1552                 assert!(htlc_timeout_updates.update_add_htlcs.is_empty());
1553                 assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1);
1554                 assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty());
1555                 assert!(htlc_timeout_updates.update_fee.is_none());
1556
1557                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
1558                 commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
1559                 expect_payment_failed!(nodes[0], payment_hash, false, 0x2000 | 2, []);
1560
1561                 // Check for unknown intercept id error.
1562                 let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None);
1563                 let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
1564                 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1565                 let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err();
1566                 assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) });
1567         }
1568 }