e59cf47f17600963c9852389fc5723d8189816ef
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::constants::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::AnnouncementSigsState;
24 use crate::ln::msgs;
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::enforcing_trait_impls::EnforcingSigner;
27 use crate::util::errors::APIError;
28 use crate::util::ser::{ReadableArgs, Writeable};
29 use crate::util::test_utils::TestBroadcaster;
30
31 use crate::ln::functional_test_utils::*;
32
33 use crate::util::test_utils;
34
35 use crate::io;
36 use bitcoin::hashes::Hash;
37 use crate::prelude::*;
38 use crate::sync::{Arc, Mutex};
39
40 #[test]
41 fn test_simple_monitor_permanent_update_fail() {
42         // Test that we handle a simple permanent monitor update failure
43         let chanmon_cfgs = create_chanmon_cfgs(2);
44         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
45         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
46         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47         create_announced_chan_between_nodes(&nodes, 0, 1);
48
49         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
50         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
51         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
52                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
53                 ), true, APIError::ChannelUnavailable {..}, {});
54         check_added_monitors!(nodes[0], 2);
55
56         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57         assert_eq!(events_1.len(), 2);
58         match events_1[0] {
59                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60                 _ => panic!("Unexpected event"),
61         };
62         match events_1[1] {
63                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64                 _ => panic!("Unexpected event"),
65         };
66
67         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
68
69         // TODO: Once we hit the chain with the failure transaction we should check that we get a
70         // PaymentPathFailed event
71
72         assert_eq!(nodes[0].node.list_channels().len(), 0);
73         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
74 }
75
76 #[test]
77 fn test_monitor_and_persister_update_fail() {
78         // Test that if both updating the `ChannelMonitor` and persisting the updated
79         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
80         // one that gets returned.
81         let chanmon_cfgs = create_chanmon_cfgs(2);
82         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
83         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
84         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
85
86         // Create some initial channel
87         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
88         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
89
90         // Rebalance the network to generate htlc in the two directions
91         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
92
93         // Route an HTLC from node 0 to node 1 (but don't settle)
94         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
95
96         // Make a copy of the ChainMonitor so we can capture the error it returns on a
97         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
98         // directly, the node would fail to be `Drop`'d at the end because its
99         // ChannelManager and ChainMonitor would be out of sync.
100         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
101         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
102         let persister = test_utils::TestPersister::new();
103         let tx_broadcaster = TestBroadcaster {
104                 txn_broadcasted: Mutex::new(Vec::new()),
105                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
106                 // that we are at height 200 so that it doesn't think we're violating the time lock
107                 // requirements of transactions broadcasted at that point.
108                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
109         };
110         let chain_mon = {
111                 let new_monitor = {
112                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
113                         let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
114                                 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
115                         assert!(new_monitor == *monitor);
116                         new_monitor
117                 };
118                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
119                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
120                 chain_mon
121         };
122         chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
123
124         // Set the persister's return value to be a InProgress.
125         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
126
127         // Try to update ChannelMonitor
128         nodes[1].node.claim_funds(preimage);
129         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
130         check_added_monitors!(nodes[1], 1);
131
132         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
133         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
134         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
135         {
136                 let mut node_0_per_peer_lock;
137                 let mut node_0_peer_state_lock;
138                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
139                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
140                         // Check that even though the persister is returning a InProgress,
141                         // because the update is bogus, ultimately the error that's returned
142                         // should be a PermanentFailure.
143                         if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
144                         logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
145                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
146                 } else { assert!(false); }
147         }
148
149         check_added_monitors!(nodes[0], 1);
150         let events = nodes[0].node.get_and_clear_pending_events();
151         assert_eq!(events.len(), 1);
152 }
153
154 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
155         // Test that we can recover from a simple temporary monitor update failure optionally with
156         // a disconnect in between
157         let chanmon_cfgs = create_chanmon_cfgs(2);
158         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
159         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
160         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
161         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
162
163         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
164
165         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
166
167         {
168                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
169                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
170                         ), false, APIError::MonitorUpdateInProgress, {});
171                 check_added_monitors!(nodes[0], 1);
172         }
173
174         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
175         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
176         assert_eq!(nodes[0].node.list_channels().len(), 1);
177
178         if disconnect {
179                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
180                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
181                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
182         }
183
184         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
185         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
186         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
187         check_added_monitors!(nodes[0], 0);
188
189         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
190         assert_eq!(events_2.len(), 1);
191         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
192         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
193         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
194         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
195
196         expect_pending_htlcs_forwardable!(nodes[1]);
197
198         let events_3 = nodes[1].node.get_and_clear_pending_events();
199         assert_eq!(events_3.len(), 1);
200         match events_3[0] {
201                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
202                         assert_eq!(payment_hash_1, *payment_hash);
203                         assert_eq!(amount_msat, 1_000_000);
204                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
205                         assert_eq!(via_channel_id, Some(channel_id));
206                         match &purpose {
207                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
208                                         assert!(payment_preimage.is_none());
209                                         assert_eq!(payment_secret_1, *payment_secret);
210                                 },
211                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
212                         }
213                 },
214                 _ => panic!("Unexpected event"),
215         }
216
217         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
218
219         // Now set it to failed again...
220         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
221         {
222                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
223                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
224                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
225                         ), false, APIError::MonitorUpdateInProgress, {});
226                 check_added_monitors!(nodes[0], 1);
227         }
228
229         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
230         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
231         assert_eq!(nodes[0].node.list_channels().len(), 1);
232
233         if disconnect {
234                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
235                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
236                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
237         }
238
239         // ...and make sure we can force-close a frozen channel
240         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
241         check_added_monitors!(nodes[0], 1);
242         check_closed_broadcast!(nodes[0], true);
243
244         // TODO: Once we hit the chain with the failure transaction we should check that we get a
245         // PaymentPathFailed event
246
247         assert_eq!(nodes[0].node.list_channels().len(), 0);
248         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
249 }
250
251 #[test]
252 fn test_simple_monitor_temporary_update_fail() {
253         do_test_simple_monitor_temporary_update_fail(false);
254         do_test_simple_monitor_temporary_update_fail(true);
255 }
256
257 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
258         let disconnect_flags = 8 | 16;
259
260         // Test that we can recover from a temporary monitor update failure with some in-flight
261         // HTLCs going on at the same time potentially with some disconnection thrown in.
262         // * First we route a payment, then get a temporary monitor update failure when trying to
263         //   route a second payment. We then claim the first payment.
264         // * If disconnect_count is set, we will disconnect at this point (which is likely as
265         //   InProgress likely indicates net disconnect which resulted in failing to update the
266         //   ChannelMonitor on a watchtower).
267         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
268         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
269         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
270         //   disconnect_count & !disconnect_flags is 0).
271         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
272         //   through message sending, potentially disconnect/reconnecting multiple times based on
273         //   disconnect_count, to get the update_fulfill_htlc through.
274         // * We then walk through more message exchanges to get the original update_add_htlc
275         //   through, swapping message ordering based on disconnect_count & 8 and optionally
276         //   disconnect/reconnecting based on disconnect_count.
277         let chanmon_cfgs = create_chanmon_cfgs(2);
278         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
279         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
280         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
281         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
282
283         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
284
285         // Now try to send a second payment which will fail to send
286         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
287         {
288                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
289                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
290                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
291                         ), false, APIError::MonitorUpdateInProgress, {});
292                 check_added_monitors!(nodes[0], 1);
293         }
294
295         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
296         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
297         assert_eq!(nodes[0].node.list_channels().len(), 1);
298
299         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
300         // but nodes[0] won't respond since it is frozen.
301         nodes[1].node.claim_funds(payment_preimage_1);
302         check_added_monitors!(nodes[1], 1);
303         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
304
305         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
306         assert_eq!(events_2.len(), 1);
307         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
308                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
309                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
310                         assert!(update_add_htlcs.is_empty());
311                         assert_eq!(update_fulfill_htlcs.len(), 1);
312                         assert!(update_fail_htlcs.is_empty());
313                         assert!(update_fail_malformed_htlcs.is_empty());
314                         assert!(update_fee.is_none());
315
316                         if (disconnect_count & 16) == 0 {
317                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
318                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
319                                 assert_eq!(events_3.len(), 1);
320                                 match events_3[0] {
321                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
322                                                 assert_eq!(*payment_preimage, payment_preimage_1);
323                                                 assert_eq!(*payment_hash, payment_hash_1);
324                                         },
325                                         _ => panic!("Unexpected event"),
326                                 }
327
328                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
329                                 check_added_monitors!(nodes[0], 1);
330                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
331                         }
332
333                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
334                 },
335                 _ => panic!("Unexpected event"),
336         };
337
338         if disconnect_count & !disconnect_flags > 0 {
339                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
340                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
341         }
342
343         // Now fix monitor updating...
344         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
345         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
346         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
347         check_added_monitors!(nodes[0], 0);
348
349         macro_rules! disconnect_reconnect_peers { () => { {
350                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
351                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
352
353                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
354                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
355                 assert_eq!(reestablish_1.len(), 1);
356                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
357                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
358                 assert_eq!(reestablish_2.len(), 1);
359
360                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
361                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
362                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
363                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
364
365                 assert!(as_resp.0.is_none());
366                 assert!(bs_resp.0.is_none());
367
368                 (reestablish_1, reestablish_2, as_resp, bs_resp)
369         } } }
370
371         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
372                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
373                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
374
375                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
376                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
377                 assert_eq!(reestablish_1.len(), 1);
378                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
379                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
380                 assert_eq!(reestablish_2.len(), 1);
381
382                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
383                 check_added_monitors!(nodes[0], 0);
384                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
385                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
386                 check_added_monitors!(nodes[1], 0);
387                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
388
389                 assert!(as_resp.0.is_none());
390                 assert!(bs_resp.0.is_none());
391
392                 assert!(bs_resp.1.is_none());
393                 if (disconnect_count & 16) == 0 {
394                         assert!(bs_resp.2.is_none());
395
396                         assert!(as_resp.1.is_some());
397                         assert!(as_resp.2.is_some());
398                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
399                 } else {
400                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
401                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
402                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
403                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
404                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
405                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
406
407                         assert!(as_resp.1.is_none());
408
409                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
410                         let events_3 = nodes[0].node.get_and_clear_pending_events();
411                         assert_eq!(events_3.len(), 1);
412                         match events_3[0] {
413                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
414                                         assert_eq!(*payment_preimage, payment_preimage_1);
415                                         assert_eq!(*payment_hash, payment_hash_1);
416                                 },
417                                 _ => panic!("Unexpected event"),
418                         }
419
420                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
421                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
422                         // No commitment_signed so get_event_msg's assert(len == 1) passes
423                         check_added_monitors!(nodes[0], 1);
424
425                         as_resp.1 = Some(as_resp_raa);
426                         bs_resp.2 = None;
427                 }
428
429                 if disconnect_count & !disconnect_flags > 1 {
430                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
431
432                         if (disconnect_count & 16) == 0 {
433                                 assert!(reestablish_1 == second_reestablish_1);
434                                 assert!(reestablish_2 == second_reestablish_2);
435                         }
436                         assert!(as_resp == second_as_resp);
437                         assert!(bs_resp == second_bs_resp);
438                 }
439
440                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
441         } else {
442                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
443                 assert_eq!(events_4.len(), 2);
444                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
445                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
446                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
447                                 msg.clone()
448                         },
449                         _ => panic!("Unexpected event"),
450                 })
451         };
452
453         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
454
455         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
456         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
457         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
458         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
459         check_added_monitors!(nodes[1], 1);
460
461         if disconnect_count & !disconnect_flags > 2 {
462                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
463
464                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
465                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
466
467                 assert!(as_resp.2.is_none());
468                 assert!(bs_resp.2.is_none());
469         }
470
471         let as_commitment_update;
472         let bs_second_commitment_update;
473
474         macro_rules! handle_bs_raa { () => {
475                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
476                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
477                 assert!(as_commitment_update.update_add_htlcs.is_empty());
478                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
479                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
480                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
481                 assert!(as_commitment_update.update_fee.is_none());
482                 check_added_monitors!(nodes[0], 1);
483         } }
484
485         macro_rules! handle_initial_raa { () => {
486                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
487                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
488                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
489                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
490                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
491                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
492                 assert!(bs_second_commitment_update.update_fee.is_none());
493                 check_added_monitors!(nodes[1], 1);
494         } }
495
496         if (disconnect_count & 8) == 0 {
497                 handle_bs_raa!();
498
499                 if disconnect_count & !disconnect_flags > 3 {
500                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
501
502                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
503                         assert!(bs_resp.1.is_none());
504
505                         assert!(as_resp.2.unwrap() == as_commitment_update);
506                         assert!(bs_resp.2.is_none());
507
508                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
509                 }
510
511                 handle_initial_raa!();
512
513                 if disconnect_count & !disconnect_flags > 4 {
514                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
515
516                         assert!(as_resp.1.is_none());
517                         assert!(bs_resp.1.is_none());
518
519                         assert!(as_resp.2.unwrap() == as_commitment_update);
520                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
521                 }
522         } else {
523                 handle_initial_raa!();
524
525                 if disconnect_count & !disconnect_flags > 3 {
526                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
527
528                         assert!(as_resp.1.is_none());
529                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
530
531                         assert!(as_resp.2.is_none());
532                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
533
534                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
535                 }
536
537                 handle_bs_raa!();
538
539                 if disconnect_count & !disconnect_flags > 4 {
540                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
541
542                         assert!(as_resp.1.is_none());
543                         assert!(bs_resp.1.is_none());
544
545                         assert!(as_resp.2.unwrap() == as_commitment_update);
546                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
547                 }
548         }
549
550         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
551         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
552         // No commitment_signed so get_event_msg's assert(len == 1) passes
553         check_added_monitors!(nodes[0], 1);
554
555         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
556         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
557         // No commitment_signed so get_event_msg's assert(len == 1) passes
558         check_added_monitors!(nodes[1], 1);
559
560         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
561         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
562         check_added_monitors!(nodes[1], 1);
563
564         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
565         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
566         check_added_monitors!(nodes[0], 1);
567         expect_payment_path_successful!(nodes[0]);
568
569         expect_pending_htlcs_forwardable!(nodes[1]);
570
571         let events_5 = nodes[1].node.get_and_clear_pending_events();
572         assert_eq!(events_5.len(), 1);
573         match events_5[0] {
574                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
575                         assert_eq!(payment_hash_2, *payment_hash);
576                         assert_eq!(amount_msat, 1_000_000);
577                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
578                         assert_eq!(via_channel_id, Some(channel_id));
579                         match &purpose {
580                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
581                                         assert!(payment_preimage.is_none());
582                                         assert_eq!(payment_secret_2, *payment_secret);
583                                 },
584                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
585                         }
586                 },
587                 _ => panic!("Unexpected event"),
588         }
589
590         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
591 }
592
593 #[test]
594 fn test_monitor_temporary_update_fail_a() {
595         do_test_monitor_temporary_update_fail(0);
596         do_test_monitor_temporary_update_fail(1);
597         do_test_monitor_temporary_update_fail(2);
598         do_test_monitor_temporary_update_fail(3);
599         do_test_monitor_temporary_update_fail(4);
600         do_test_monitor_temporary_update_fail(5);
601 }
602
603 #[test]
604 fn test_monitor_temporary_update_fail_b() {
605         do_test_monitor_temporary_update_fail(2 | 8);
606         do_test_monitor_temporary_update_fail(3 | 8);
607         do_test_monitor_temporary_update_fail(4 | 8);
608         do_test_monitor_temporary_update_fail(5 | 8);
609 }
610
611 #[test]
612 fn test_monitor_temporary_update_fail_c() {
613         do_test_monitor_temporary_update_fail(1 | 16);
614         do_test_monitor_temporary_update_fail(2 | 16);
615         do_test_monitor_temporary_update_fail(3 | 16);
616         do_test_monitor_temporary_update_fail(2 | 8 | 16);
617         do_test_monitor_temporary_update_fail(3 | 8 | 16);
618 }
619
620 #[test]
621 fn test_monitor_update_fail_cs() {
622         // Tests handling of a monitor update failure when processing an incoming commitment_signed
623         let chanmon_cfgs = create_chanmon_cfgs(2);
624         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
625         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
626         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
627         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
628
629         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
630         {
631                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
632                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
633                 check_added_monitors!(nodes[0], 1);
634         }
635
636         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
637         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
638
639         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
640         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
641         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
642         check_added_monitors!(nodes[1], 1);
643         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
644
645         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
646         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
647         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
648         check_added_monitors!(nodes[1], 0);
649         let responses = nodes[1].node.get_and_clear_pending_msg_events();
650         assert_eq!(responses.len(), 2);
651
652         match responses[0] {
653                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
654                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
655                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
656                         check_added_monitors!(nodes[0], 1);
657                 },
658                 _ => panic!("Unexpected event"),
659         }
660         match responses[1] {
661                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
662                         assert!(updates.update_add_htlcs.is_empty());
663                         assert!(updates.update_fulfill_htlcs.is_empty());
664                         assert!(updates.update_fail_htlcs.is_empty());
665                         assert!(updates.update_fail_malformed_htlcs.is_empty());
666                         assert!(updates.update_fee.is_none());
667                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
668
669                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
670                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
671                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
672                         check_added_monitors!(nodes[0], 1);
673                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
674                 },
675                 _ => panic!("Unexpected event"),
676         }
677
678         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
679         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
680         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
681         check_added_monitors!(nodes[0], 0);
682
683         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
684         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
685         check_added_monitors!(nodes[1], 1);
686
687         expect_pending_htlcs_forwardable!(nodes[1]);
688
689         let events = nodes[1].node.get_and_clear_pending_events();
690         assert_eq!(events.len(), 1);
691         match events[0] {
692                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
693                         assert_eq!(payment_hash, our_payment_hash);
694                         assert_eq!(amount_msat, 1_000_000);
695                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
696                         assert_eq!(via_channel_id, Some(channel_id));
697                         match &purpose {
698                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
699                                         assert!(payment_preimage.is_none());
700                                         assert_eq!(our_payment_secret, *payment_secret);
701                                 },
702                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
703                         }
704                 },
705                 _ => panic!("Unexpected event"),
706         };
707
708         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
709 }
710
711 #[test]
712 fn test_monitor_update_fail_no_rebroadcast() {
713         // Tests handling of a monitor update failure when no message rebroadcasting on
714         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
715         // fuzz tests.
716         let chanmon_cfgs = create_chanmon_cfgs(2);
717         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
718         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
719         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
720         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
721
722         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
723         {
724                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
725                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
726                 check_added_monitors!(nodes[0], 1);
727         }
728
729         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
730         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
731         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
732
733         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
734         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
735         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
736         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
737         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
738         check_added_monitors!(nodes[1], 1);
739
740         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
741         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
742         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
743         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
744         check_added_monitors!(nodes[1], 0);
745         expect_pending_htlcs_forwardable!(nodes[1]);
746
747         let events = nodes[1].node.get_and_clear_pending_events();
748         assert_eq!(events.len(), 1);
749         match events[0] {
750                 Event::PaymentClaimable { payment_hash, .. } => {
751                         assert_eq!(payment_hash, our_payment_hash);
752                 },
753                 _ => panic!("Unexpected event"),
754         }
755
756         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
757 }
758
759 #[test]
760 fn test_monitor_update_raa_while_paused() {
761         // Tests handling of an RAA while monitor updating has already been marked failed.
762         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
763         let chanmon_cfgs = create_chanmon_cfgs(2);
764         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
765         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
766         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
767         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
768
769         send_payment(&nodes[0], &[&nodes[1]], 5000000);
770         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
771         {
772                 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
773                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
774                 check_added_monitors!(nodes[0], 1);
775         }
776         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
777
778         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
779         {
780                 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
781                         RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
782                 check_added_monitors!(nodes[1], 1);
783         }
784         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
785
786         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
787         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
788         check_added_monitors!(nodes[1], 1);
789         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
790
791         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
792         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
793         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
794         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
795         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
796         check_added_monitors!(nodes[0], 1);
797         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
798
799         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
800         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
801         check_added_monitors!(nodes[0], 1);
802
803         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
804         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
805         check_added_monitors!(nodes[0], 0);
806
807         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
808         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
809         check_added_monitors!(nodes[1], 1);
810         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
811
812         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
813         check_added_monitors!(nodes[1], 1);
814         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
815
816         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
817         check_added_monitors!(nodes[0], 1);
818         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
819
820         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
821         check_added_monitors!(nodes[0], 1);
822         expect_pending_htlcs_forwardable!(nodes[0]);
823         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
824
825         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
826         check_added_monitors!(nodes[1], 1);
827         expect_pending_htlcs_forwardable!(nodes[1]);
828         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
829
830         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
831         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
832 }
833
834 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
835         // Tests handling of a monitor update failure when processing an incoming RAA
836         let chanmon_cfgs = create_chanmon_cfgs(3);
837         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
838         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
839         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
840         create_announced_chan_between_nodes(&nodes, 0, 1);
841         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
842
843         // Rebalance a bit so that we can send backwards from 2 to 1.
844         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
845
846         // Route a first payment that we'll fail backwards
847         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
848
849         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
850         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
851         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
852         check_added_monitors!(nodes[2], 1);
853
854         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
855         assert!(updates.update_add_htlcs.is_empty());
856         assert!(updates.update_fulfill_htlcs.is_empty());
857         assert_eq!(updates.update_fail_htlcs.len(), 1);
858         assert!(updates.update_fail_malformed_htlcs.is_empty());
859         assert!(updates.update_fee.is_none());
860         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
861
862         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
863         check_added_monitors!(nodes[0], 0);
864
865         // While the second channel is AwaitingRAA, forward a second payment to get it into the
866         // holding cell.
867         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
868         {
869                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
870                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
871                 check_added_monitors!(nodes[0], 1);
872         }
873
874         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
875         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
876         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
877
878         expect_pending_htlcs_forwardable!(nodes[1]);
879         check_added_monitors!(nodes[1], 0);
880         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
881
882         // Now fail monitor updating.
883         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
884         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
885         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
886         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
887         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
888         check_added_monitors!(nodes[1], 1);
889
890         // Forward a third payment which will also be added to the holding cell, despite the channel
891         // being paused waiting a monitor update.
892         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
893         {
894                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
895                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
896                 check_added_monitors!(nodes[0], 1);
897         }
898
899         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
900         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
901         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
902         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
903         check_added_monitors!(nodes[1], 0);
904
905         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
906         // and not forwarded.
907         expect_pending_htlcs_forwardable!(nodes[1]);
908         check_added_monitors!(nodes[1], 0);
909         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
910
911         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
912                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
913                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
914                 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
915                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
916                 check_added_monitors!(nodes[2], 1);
917
918                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
919                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
920                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
921                 check_added_monitors!(nodes[1], 1);
922                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
923                 (Some(payment_preimage_4), Some(payment_hash_4))
924         } else { (None, None) };
925
926         // Restore monitor updating, ensuring we immediately get a fail-back update and a
927         // update_add update.
928         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
929         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
930         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
931         check_added_monitors!(nodes[1], 0);
932         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
933         check_added_monitors!(nodes[1], 1);
934
935         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
936         if test_ignore_second_cs {
937                 assert_eq!(events_3.len(), 3);
938         } else {
939                 assert_eq!(events_3.len(), 2);
940         }
941
942         // Note that the ordering of the events for different nodes is non-prescriptive, though the
943         // ordering of the two events that both go to nodes[2] have to stay in the same order.
944         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
945         let messages_a = match nodes_0_event {
946                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
947                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
948                         assert!(updates.update_fulfill_htlcs.is_empty());
949                         assert_eq!(updates.update_fail_htlcs.len(), 1);
950                         assert!(updates.update_fail_malformed_htlcs.is_empty());
951                         assert!(updates.update_add_htlcs.is_empty());
952                         assert!(updates.update_fee.is_none());
953                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
954                 },
955                 _ => panic!("Unexpected event type!"),
956         };
957
958         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
959         let send_event_b = SendEvent::from_event(nodes_2_event);
960         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
961
962         let raa = if test_ignore_second_cs {
963                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
964                 match nodes_2_event {
965                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
966                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
967                                 Some(msg.clone())
968                         },
969                         _ => panic!("Unexpected event"),
970                 }
971         } else { None };
972
973         // Now deliver the new messages...
974
975         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
976         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
977         expect_payment_failed!(nodes[0], payment_hash_1, true);
978
979         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
980         let as_cs;
981         if test_ignore_second_cs {
982                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
983                 check_added_monitors!(nodes[2], 1);
984                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
985                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
986                 check_added_monitors!(nodes[2], 1);
987                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
988                 assert!(bs_cs.update_add_htlcs.is_empty());
989                 assert!(bs_cs.update_fail_htlcs.is_empty());
990                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
991                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
992                 assert!(bs_cs.update_fee.is_none());
993
994                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
995                 check_added_monitors!(nodes[1], 1);
996                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
997
998                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
999                 check_added_monitors!(nodes[1], 1);
1000         } else {
1001                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1002                 check_added_monitors!(nodes[2], 1);
1003
1004                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1005                 // As both messages are for nodes[1], they're in order.
1006                 assert_eq!(bs_revoke_and_commit.len(), 2);
1007                 match bs_revoke_and_commit[0] {
1008                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1009                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1010                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1011                                 check_added_monitors!(nodes[1], 1);
1012                         },
1013                         _ => panic!("Unexpected event"),
1014                 }
1015
1016                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1017
1018                 match bs_revoke_and_commit[1] {
1019                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1020                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1021                                 assert!(updates.update_add_htlcs.is_empty());
1022                                 assert!(updates.update_fail_htlcs.is_empty());
1023                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1024                                 assert!(updates.update_fulfill_htlcs.is_empty());
1025                                 assert!(updates.update_fee.is_none());
1026                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1027                                 check_added_monitors!(nodes[1], 1);
1028                         },
1029                         _ => panic!("Unexpected event"),
1030                 }
1031         }
1032
1033         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1034         assert!(as_cs.update_fail_htlcs.is_empty());
1035         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1036         assert!(as_cs.update_fulfill_htlcs.is_empty());
1037         assert!(as_cs.update_fee.is_none());
1038         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1039
1040
1041         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1042         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1043         check_added_monitors!(nodes[2], 1);
1044         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1045
1046         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1047         check_added_monitors!(nodes[2], 1);
1048         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1049
1050         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1051         check_added_monitors!(nodes[1], 1);
1052         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1053
1054         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1055         check_added_monitors!(nodes[1], 1);
1056         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1057
1058         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1059         check_added_monitors!(nodes[2], 1);
1060         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1061
1062         expect_pending_htlcs_forwardable!(nodes[2]);
1063
1064         let events_6 = nodes[2].node.get_and_clear_pending_events();
1065         assert_eq!(events_6.len(), 2);
1066         match events_6[0] {
1067                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1068                 _ => panic!("Unexpected event"),
1069         };
1070         match events_6[1] {
1071                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1072                 _ => panic!("Unexpected event"),
1073         };
1074
1075         if test_ignore_second_cs {
1076                 expect_pending_htlcs_forwardable!(nodes[1]);
1077                 check_added_monitors!(nodes[1], 1);
1078
1079                 send_event = SendEvent::from_node(&nodes[1]);
1080                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1081                 assert_eq!(send_event.msgs.len(), 1);
1082                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1083                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1084
1085                 expect_pending_htlcs_forwardable!(nodes[0]);
1086
1087                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1088                 assert_eq!(events_9.len(), 1);
1089                 match events_9[0] {
1090                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1091                         _ => panic!("Unexpected event"),
1092                 };
1093                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1094         }
1095
1096         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1097 }
1098
1099 #[test]
1100 fn test_monitor_update_fail_raa() {
1101         do_test_monitor_update_fail_raa(false);
1102         do_test_monitor_update_fail_raa(true);
1103 }
1104
1105 #[test]
1106 fn test_monitor_update_fail_reestablish() {
1107         // Simple test for message retransmission after monitor update failure on
1108         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1109         // HTLCs).
1110         let chanmon_cfgs = create_chanmon_cfgs(3);
1111         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1112         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1113         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1114         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1115         create_announced_chan_between_nodes(&nodes, 1, 2);
1116
1117         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1118
1119         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1120         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1121
1122         nodes[2].node.claim_funds(payment_preimage);
1123         check_added_monitors!(nodes[2], 1);
1124         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1125
1126         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1127         assert!(updates.update_add_htlcs.is_empty());
1128         assert!(updates.update_fail_htlcs.is_empty());
1129         assert!(updates.update_fail_malformed_htlcs.is_empty());
1130         assert!(updates.update_fee.is_none());
1131         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1132         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1133         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1134         check_added_monitors!(nodes[1], 1);
1135         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1136         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1137
1138         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1139         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
1140         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
1141
1142         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1143         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1144
1145         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1146
1147         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1148         assert_eq!(
1149                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1150                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1151
1152         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1153         check_added_monitors!(nodes[1], 1);
1154
1155         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1156         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1157
1158         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
1159         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
1160
1161         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1162         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1163
1164         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1165         assert_eq!(
1166                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1167                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1168
1169         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1170         check_added_monitors!(nodes[1], 0);
1171         assert_eq!(
1172                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1173                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1174
1175         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1176         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1177         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1178         check_added_monitors!(nodes[1], 0);
1179
1180         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1181         assert!(updates.update_add_htlcs.is_empty());
1182         assert!(updates.update_fail_htlcs.is_empty());
1183         assert!(updates.update_fail_malformed_htlcs.is_empty());
1184         assert!(updates.update_fee.is_none());
1185         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1186         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1187         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1188         expect_payment_sent!(nodes[0], payment_preimage);
1189 }
1190
1191 #[test]
1192 fn raa_no_response_awaiting_raa_state() {
1193         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1194         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1195         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1196         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1197         let chanmon_cfgs = create_chanmon_cfgs(2);
1198         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1199         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1200         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1201         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1202
1203         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1204         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1205         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1206
1207         // Queue up two payments - one will be delivered right away, one immediately goes into the
1208         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1209         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1210         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1211         // generation during RAA while in monitor-update-failed state.
1212         {
1213                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1214                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1215                 check_added_monitors!(nodes[0], 1);
1216                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1217                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1218                 check_added_monitors!(nodes[0], 0);
1219         }
1220
1221         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1222         assert_eq!(events.len(), 1);
1223         let payment_event = SendEvent::from_event(events.pop().unwrap());
1224         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1225         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1226         check_added_monitors!(nodes[1], 1);
1227
1228         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1229         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1230         check_added_monitors!(nodes[0], 1);
1231         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1232         assert_eq!(events.len(), 1);
1233         let payment_event = SendEvent::from_event(events.pop().unwrap());
1234
1235         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1236         check_added_monitors!(nodes[0], 1);
1237         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1238
1239         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1240         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1241         // then restore channel monitor updates.
1242         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1243         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1244         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1245         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1246         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1247         check_added_monitors!(nodes[1], 1);
1248         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1249
1250         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1251         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1252         check_added_monitors!(nodes[1], 1);
1253
1254         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1255         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1256         // nodes[1] should be AwaitingRAA here!
1257         check_added_monitors!(nodes[1], 0);
1258         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1259         expect_pending_htlcs_forwardable!(nodes[1]);
1260         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1261
1262         // We send a third payment here, which is somewhat of a redundant test, but the
1263         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1264         // commitment transaction states) whereas here we can explicitly check for it.
1265         {
1266                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1267                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1268                 check_added_monitors!(nodes[0], 0);
1269                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1270         }
1271         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1272         check_added_monitors!(nodes[0], 1);
1273         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1274         assert_eq!(events.len(), 1);
1275         let payment_event = SendEvent::from_event(events.pop().unwrap());
1276
1277         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1278         check_added_monitors!(nodes[0], 1);
1279         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1280
1281         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1282         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1283         check_added_monitors!(nodes[1], 1);
1284         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1285
1286         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1287         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1288         check_added_monitors!(nodes[1], 1);
1289         expect_pending_htlcs_forwardable!(nodes[1]);
1290         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1291         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1292
1293         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1294         check_added_monitors!(nodes[0], 1);
1295
1296         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1297         check_added_monitors!(nodes[0], 1);
1298         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1299
1300         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1301         check_added_monitors!(nodes[1], 1);
1302         expect_pending_htlcs_forwardable!(nodes[1]);
1303         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1304
1305         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1306         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1307         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1308 }
1309
1310 #[test]
1311 fn claim_while_disconnected_monitor_update_fail() {
1312         // Test for claiming a payment while disconnected and then having the resulting
1313         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1314         // contrived case for nodes with network instability.
1315         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1316         // code introduced a regression in this test (specifically, this caught a removal of the
1317         // channel_reestablish handling ensuring the order was sensical given the messages used).
1318         let chanmon_cfgs = create_chanmon_cfgs(2);
1319         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1320         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1321         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1322         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1323
1324         // Forward a payment for B to claim
1325         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1326
1327         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1328         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1329
1330         nodes[1].node.claim_funds(payment_preimage_1);
1331         check_added_monitors!(nodes[1], 1);
1332         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1333
1334         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
1335         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
1336
1337         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1338         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1339
1340         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1341         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1342
1343         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1344         // update.
1345         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1346
1347         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1348         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1349         check_added_monitors!(nodes[1], 1);
1350         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1351
1352         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1353         // the monitor still failed
1354         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1355         {
1356                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1357                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1358                 check_added_monitors!(nodes[0], 1);
1359         }
1360
1361         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1362         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1363         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1364         check_added_monitors!(nodes[1], 1);
1365         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1366         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1367         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1368
1369         // Now un-fail the monitor, which will result in B sending its original commitment update,
1370         // receiving the commitment update from A, and the resulting commitment dances.
1371         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1372         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1373         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1374         check_added_monitors!(nodes[1], 0);
1375
1376         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1377         assert_eq!(bs_msgs.len(), 2);
1378
1379         match bs_msgs[0] {
1380                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1381                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1382                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1383                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1384                         check_added_monitors!(nodes[0], 1);
1385
1386                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1387                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1388                         check_added_monitors!(nodes[1], 1);
1389                 },
1390                 _ => panic!("Unexpected event"),
1391         }
1392
1393         match bs_msgs[1] {
1394                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1395                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1396                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1397                         check_added_monitors!(nodes[0], 1);
1398                 },
1399                 _ => panic!("Unexpected event"),
1400         }
1401
1402         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1403
1404         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1405         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1406         check_added_monitors!(nodes[0], 1);
1407         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1408
1409         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1410         check_added_monitors!(nodes[1], 1);
1411         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1412         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1413         check_added_monitors!(nodes[1], 1);
1414
1415         expect_pending_htlcs_forwardable!(nodes[1]);
1416         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1417
1418         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1419         check_added_monitors!(nodes[0], 1);
1420         expect_payment_sent!(nodes[0], payment_preimage_1);
1421
1422         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1423 }
1424
1425 #[test]
1426 fn monitor_failed_no_reestablish_response() {
1427         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1428         // response to a commitment_signed.
1429         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1430         // debug_assert!() failure in channel_reestablish handling.
1431         let chanmon_cfgs = create_chanmon_cfgs(2);
1432         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1433         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1434         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1435         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1436         {
1437                 let mut node_0_per_peer_lock;
1438                 let mut node_0_peer_state_lock;
1439                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1440         }
1441         {
1442                 let mut node_1_per_peer_lock;
1443                 let mut node_1_peer_state_lock;
1444                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1445         }
1446
1447         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1448         // on receipt).
1449         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1450         {
1451                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1452                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1453                 check_added_monitors!(nodes[0], 1);
1454         }
1455
1456         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1457         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1458         assert_eq!(events.len(), 1);
1459         let payment_event = SendEvent::from_event(events.pop().unwrap());
1460         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1461         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1462         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1463         check_added_monitors!(nodes[1], 1);
1464
1465         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1466         // is still failing to update monitors.
1467         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1468         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1469
1470         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
1471         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
1472
1473         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1474         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1475
1476         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1477         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1478         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1479         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1480
1481         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1482         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1483         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1484         check_added_monitors!(nodes[1], 0);
1485         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1486
1487         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1488         check_added_monitors!(nodes[0], 1);
1489         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1490         check_added_monitors!(nodes[0], 1);
1491
1492         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1493         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1494         check_added_monitors!(nodes[1], 1);
1495
1496         expect_pending_htlcs_forwardable!(nodes[1]);
1497         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1498
1499         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1500 }
1501
1502 #[test]
1503 fn first_message_on_recv_ordering() {
1504         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1505         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1506         // a commitment_signed which needs to send an RAA first.
1507         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1508         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1509         // response. To do this, we start routing two payments, with the final RAA for the first being
1510         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1511         // have no pending response but will want to send a RAA/CS (with the updates for the second
1512         // payment applied).
1513         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1514         let chanmon_cfgs = create_chanmon_cfgs(2);
1515         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1516         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1517         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1518         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1519
1520         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1521         // can deliver it and fail the monitor update.
1522         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1523         {
1524                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1525                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1526                 check_added_monitors!(nodes[0], 1);
1527         }
1528
1529         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1530         assert_eq!(events.len(), 1);
1531         let payment_event = SendEvent::from_event(events.pop().unwrap());
1532         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1533         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1534         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1535         check_added_monitors!(nodes[1], 1);
1536         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1537
1538         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1539         check_added_monitors!(nodes[0], 1);
1540         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1541         check_added_monitors!(nodes[0], 1);
1542
1543         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1544
1545         // Route the second payment, generating an update_add_htlc/commitment_signed
1546         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1547         {
1548                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1549                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1550                 check_added_monitors!(nodes[0], 1);
1551         }
1552         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1553         assert_eq!(events.len(), 1);
1554         let payment_event = SendEvent::from_event(events.pop().unwrap());
1555         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1556
1557         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1558
1559         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1560         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1561         // to the next message also tests resetting the delivery order.
1562         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1563         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1564         check_added_monitors!(nodes[1], 1);
1565
1566         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1567         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1568         // appropriate HTLC acceptance).
1569         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1570         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1571         check_added_monitors!(nodes[1], 1);
1572         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1573
1574         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1575         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1576         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1577         check_added_monitors!(nodes[1], 0);
1578
1579         expect_pending_htlcs_forwardable!(nodes[1]);
1580         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1581
1582         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1583         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1584         check_added_monitors!(nodes[0], 1);
1585         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1586         check_added_monitors!(nodes[0], 1);
1587
1588         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1589         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1590         check_added_monitors!(nodes[1], 1);
1591
1592         expect_pending_htlcs_forwardable!(nodes[1]);
1593         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1594
1595         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1596         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1597 }
1598
1599 #[test]
1600 fn test_monitor_update_fail_claim() {
1601         // Basic test for monitor update failures when processing claim_funds calls.
1602         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1603         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1604         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1605         // the payments from C onwards to A.
1606         let chanmon_cfgs = create_chanmon_cfgs(3);
1607         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1608         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1609         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1610         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1611         create_announced_chan_between_nodes(&nodes, 1, 2);
1612
1613         // Rebalance a bit so that we can send backwards from 3 to 2.
1614         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1615
1616         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1617
1618         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1619         nodes[1].node.claim_funds(payment_preimage_1);
1620         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1621         check_added_monitors!(nodes[1], 1);
1622
1623         // Note that at this point there is a pending commitment transaction update for A being held by
1624         // B. Even when we go to send the payment from C through B to A, B will not update this
1625         // already-signed commitment transaction and will instead wait for it to resolve before
1626         // forwarding the payment onwards.
1627
1628         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1629         {
1630                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1631                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1632                 check_added_monitors!(nodes[2], 1);
1633         }
1634
1635         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1636         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1637         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1638
1639         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1640         assert_eq!(events.len(), 1);
1641         let payment_event = SendEvent::from_event(events.pop().unwrap());
1642         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1643         let events = nodes[1].node.get_and_clear_pending_msg_events();
1644         assert_eq!(events.len(), 0);
1645         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1646         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1647
1648         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1649         nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1650                 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1651         check_added_monitors!(nodes[2], 1);
1652
1653         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1654         assert_eq!(events.len(), 1);
1655         let payment_event = SendEvent::from_event(events.pop().unwrap());
1656         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1657         let events = nodes[1].node.get_and_clear_pending_msg_events();
1658         assert_eq!(events.len(), 0);
1659         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1660
1661         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1662         let channel_id = chan_1.2;
1663         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1664         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1665         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1666         check_added_monitors!(nodes[1], 0);
1667
1668         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1669         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1670         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1671         expect_payment_sent!(nodes[0], payment_preimage_1);
1672
1673         // Get the payment forwards, note that they were batched into one commitment update.
1674         nodes[1].node.process_pending_htlc_forwards();
1675         check_added_monitors!(nodes[1], 1);
1676         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1677         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1678         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1679         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1680         expect_pending_htlcs_forwardable!(nodes[0]);
1681
1682         let events = nodes[0].node.get_and_clear_pending_events();
1683         assert_eq!(events.len(), 2);
1684         match events[0] {
1685                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1686                         assert_eq!(payment_hash_2, *payment_hash);
1687                         assert_eq!(1_000_000, amount_msat);
1688                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1689                         assert_eq!(via_channel_id, Some(channel_id));
1690                         assert_eq!(via_user_channel_id, Some(42));
1691                         match &purpose {
1692                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1693                                         assert!(payment_preimage.is_none());
1694                                         assert_eq!(payment_secret_2, *payment_secret);
1695                                 },
1696                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1697                         }
1698                 },
1699                 _ => panic!("Unexpected event"),
1700         }
1701         match events[1] {
1702                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1703                         assert_eq!(payment_hash_3, *payment_hash);
1704                         assert_eq!(1_000_000, amount_msat);
1705                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1706                         assert_eq!(via_channel_id, Some(channel_id));
1707                         match &purpose {
1708                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1709                                         assert!(payment_preimage.is_none());
1710                                         assert_eq!(payment_secret_3, *payment_secret);
1711                                 },
1712                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1713                         }
1714                 },
1715                 _ => panic!("Unexpected event"),
1716         }
1717 }
1718
1719 #[test]
1720 fn test_monitor_update_on_pending_forwards() {
1721         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1722         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1723         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1724         // from C to A will be pending a forward to A.
1725         let chanmon_cfgs = create_chanmon_cfgs(3);
1726         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1727         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1728         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1729         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1730         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1731
1732         // Rebalance a bit so that we can send backwards from 3 to 1.
1733         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1734
1735         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1736         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1737         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1738         check_added_monitors!(nodes[2], 1);
1739
1740         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1741         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1742         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1743         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1744
1745         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1746         {
1747                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1748                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1749                 check_added_monitors!(nodes[2], 1);
1750         }
1751
1752         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1753         assert_eq!(events.len(), 1);
1754         let payment_event = SendEvent::from_event(events.pop().unwrap());
1755         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1756         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1757
1758         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1759         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1760         check_added_monitors!(nodes[1], 1);
1761
1762         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1763         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1764         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1765         check_added_monitors!(nodes[1], 0);
1766
1767         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1768         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1769         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1770         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1771
1772         let events = nodes[0].node.get_and_clear_pending_events();
1773         assert_eq!(events.len(), 3);
1774         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1775                 assert_eq!(payment_hash, payment_hash_1);
1776                 assert!(payment_failed_permanently);
1777         } else { panic!("Unexpected event!"); }
1778         match events[2] {
1779                 Event::PaymentFailed { payment_hash, .. } => {
1780                         assert_eq!(payment_hash, payment_hash_1);
1781                 },
1782                 _ => panic!("Unexpected event"),
1783         }
1784         match events[0] {
1785                 Event::PendingHTLCsForwardable { .. } => { },
1786                 _ => panic!("Unexpected event"),
1787         };
1788         nodes[0].node.process_pending_htlc_forwards();
1789         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1790
1791         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1792 }
1793
1794 #[test]
1795 fn monitor_update_claim_fail_no_response() {
1796         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1797         // to channel being AwaitingRAA).
1798         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1799         // code was broken.
1800         let chanmon_cfgs = create_chanmon_cfgs(2);
1801         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1802         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1803         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1804         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1805
1806         // Forward a payment for B to claim
1807         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1808
1809         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1810         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1811         {
1812                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1813                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1814                 check_added_monitors!(nodes[0], 1);
1815         }
1816
1817         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1818         assert_eq!(events.len(), 1);
1819         let payment_event = SendEvent::from_event(events.pop().unwrap());
1820         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1821         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1822
1823         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1824         nodes[1].node.claim_funds(payment_preimage_1);
1825         check_added_monitors!(nodes[1], 1);
1826
1827         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1828
1829         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1830         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1831         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1832         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1833         check_added_monitors!(nodes[1], 0);
1834         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1835
1836         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1837         check_added_monitors!(nodes[1], 1);
1838         expect_pending_htlcs_forwardable!(nodes[1]);
1839         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1840
1841         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1842         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1843         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1844         expect_payment_sent!(nodes[0], payment_preimage_1);
1845
1846         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1847 }
1848
1849 // restore_b_before_conf has no meaning if !confirm_a_first
1850 // restore_b_before_lock has no meaning if confirm_a_first
1851 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1852         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1853         // the channel setup happily after the update is restored.
1854         let chanmon_cfgs = create_chanmon_cfgs(2);
1855         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1856         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1857         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1858
1859         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1860         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1861         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1862
1863         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1864
1865         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1866         check_added_monitors!(nodes[0], 0);
1867
1868         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1869         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1870         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1871         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1872         check_added_monitors!(nodes[1], 1);
1873
1874         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1875         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1876         check_added_monitors!(nodes[0], 1);
1877         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1878         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1879         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1880         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1881         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1882         check_added_monitors!(nodes[0], 0);
1883         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1884
1885         let events = nodes[0].node.get_and_clear_pending_events();
1886         assert_eq!(events.len(), 0);
1887         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1888         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1889
1890         if confirm_a_first {
1891                 confirm_transaction(&nodes[0], &funding_tx);
1892                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1893                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1894                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1895         } else {
1896                 assert!(!restore_b_before_conf);
1897                 confirm_transaction(&nodes[1], &funding_tx);
1898                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1899         }
1900
1901         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1902         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1903         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1904         reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1905
1906         // But we want to re-emit ChannelPending
1907         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1908         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1909         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1910
1911         if !restore_b_before_conf {
1912                 confirm_transaction(&nodes[1], &funding_tx);
1913                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1914                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1915         }
1916         if !confirm_a_first && !restore_b_before_lock {
1917                 confirm_transaction(&nodes[0], &funding_tx);
1918                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1919                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1920                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1921         }
1922
1923         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1924         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1925         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1926         check_added_monitors!(nodes[1], 0);
1927
1928         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1929                 if !restore_b_before_lock {
1930                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1931                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1932                 } else {
1933                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1934                         confirm_transaction(&nodes[0], &funding_tx);
1935                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1936                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1937                 }
1938         } else {
1939                 if restore_b_before_conf {
1940                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1941                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1942                         confirm_transaction(&nodes[1], &funding_tx);
1943                 }
1944                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1945                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1946         };
1947         for node in nodes.iter() {
1948                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1949                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1950                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1951         }
1952
1953         if !restore_b_before_lock {
1954                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1955         } else {
1956                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1957         }
1958
1959
1960         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1961         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1962         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1963         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1964 }
1965
1966 #[test]
1967 fn during_funding_monitor_fail() {
1968         do_during_funding_monitor_fail(true, true, false);
1969         do_during_funding_monitor_fail(true, false, false);
1970         do_during_funding_monitor_fail(false, false, false);
1971         do_during_funding_monitor_fail(false, false, true);
1972 }
1973
1974 #[test]
1975 fn test_path_paused_mpp() {
1976         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
1977         // monitor update
1978         let chanmon_cfgs = create_chanmon_cfgs(4);
1979         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
1980         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
1981         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
1982
1983         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
1984         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
1985         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
1986         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
1987
1988         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
1989
1990         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
1991         let path = route.paths[0].clone();
1992         route.paths.push(path);
1993         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
1994         route.paths[0].hops[0].short_channel_id = chan_1_id;
1995         route.paths[0].hops[1].short_channel_id = chan_3_id;
1996         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
1997         route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
1998         route.paths[1].hops[1].short_channel_id = chan_4_id;
1999
2000         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2001         // (for the path 0 -> 2 -> 3) fails.
2002         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2003         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2004
2005         // Now check that we get the right return value, indicating that the first path succeeded but
2006         // the second got a MonitorUpdateInProgress err. This implies
2007         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2008         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2009                 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2010         ) {
2011                 assert_eq!(results.len(), 2);
2012                 if let Ok(()) = results[0] {} else { panic!(); }
2013                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2014         } else { panic!(); }
2015         check_added_monitors!(nodes[0], 2);
2016         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2017
2018         // Pass the first HTLC of the payment along to nodes[3].
2019         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2020         assert_eq!(events.len(), 1);
2021         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2022
2023         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2024         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2025         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2026         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2027         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2028         assert_eq!(events.len(), 1);
2029         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2030
2031         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2032 }
2033
2034 #[test]
2035 fn test_pending_update_fee_ack_on_reconnect() {
2036         // In early versions of our automated fee update patch, nodes did not correctly use the
2037         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2038         // undelivered commitment_signed.
2039         //
2040         // B sends A new HTLC + CS, not delivered
2041         // A sends B update_fee + CS
2042         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2043         // reconnect
2044         // B resends initial CS, using the original fee
2045
2046         let chanmon_cfgs = create_chanmon_cfgs(2);
2047         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2048         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2049         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2050
2051         create_announced_chan_between_nodes(&nodes, 0, 1);
2052         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2053
2054         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2055         nodes[1].node.send_payment_with_route(&route, payment_hash,
2056                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2057         check_added_monitors!(nodes[1], 1);
2058         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2059         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2060
2061         {
2062                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2063                 *feerate_lock *= 2;
2064         }
2065         nodes[0].node.timer_tick_occurred();
2066         check_added_monitors!(nodes[0], 1);
2067         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2068         assert!(as_update_fee_msgs.update_fee.is_some());
2069
2070         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2071         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2072         check_added_monitors!(nodes[1], 1);
2073         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2074         // bs_first_raa is not delivered until it is re-generated after reconnect
2075
2076         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2077         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2078
2079         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
2080         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2081         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
2082         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2083
2084         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2085         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2086         assert_eq!(bs_resend_msgs.len(), 3);
2087         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2088                 assert_eq!(*updates, bs_initial_send_msgs);
2089         } else { panic!(); }
2090         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2091                 assert_eq!(*msg, bs_first_raa);
2092         } else { panic!(); }
2093         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2094
2095         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2096         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2097
2098         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2099         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2100         check_added_monitors!(nodes[0], 1);
2101         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2102         check_added_monitors!(nodes[1], 1);
2103         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2104
2105         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2106         check_added_monitors!(nodes[0], 1);
2107         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2108         check_added_monitors!(nodes[1], 1);
2109         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2110
2111         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2112         check_added_monitors!(nodes[0], 1);
2113         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2114         check_added_monitors!(nodes[0], 1);
2115
2116         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2117         check_added_monitors!(nodes[1], 1);
2118
2119         expect_pending_htlcs_forwardable!(nodes[0]);
2120         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2121
2122         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2123 }
2124
2125 #[test]
2126 fn test_fail_htlc_on_broadcast_after_claim() {
2127         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2128         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2129         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2130         // HTLC was not included in a confirmed commitment transaction.
2131         //
2132         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2133         // channel immediately before commitment occurs. After the commitment transaction reaches
2134         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2135         let chanmon_cfgs = create_chanmon_cfgs(3);
2136         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2137         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2138         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2139
2140         create_announced_chan_between_nodes(&nodes, 0, 1);
2141         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2142
2143         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2144
2145         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2146         assert_eq!(bs_txn.len(), 1);
2147
2148         nodes[2].node.claim_funds(payment_preimage);
2149         check_added_monitors!(nodes[2], 1);
2150         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2151
2152         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2153         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2154         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2155         check_added_monitors!(nodes[1], 1);
2156         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2157
2158         mine_transaction(&nodes[1], &bs_txn[0]);
2159         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2160         check_closed_broadcast!(nodes[1], true);
2161         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2162         check_added_monitors!(nodes[1], 1);
2163         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2164
2165         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2166         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2167         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2168         expect_payment_path_successful!(nodes[0]);
2169 }
2170
2171 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2172         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2173         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2174         // explicitly here.
2175         let chanmon_cfgs = create_chanmon_cfgs(2);
2176         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2177         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2178         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2179
2180         create_announced_chan_between_nodes(&nodes, 0, 1);
2181         send_payment(&nodes[0], &[&nodes[1]], 1000);
2182
2183         {
2184                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2185                 *feerate_lock += 20;
2186         }
2187         nodes[0].node.timer_tick_occurred();
2188         check_added_monitors!(nodes[0], 1);
2189         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2190         assert!(update_msgs.update_fee.is_some());
2191         if deliver_update {
2192                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2193         }
2194
2195         if parallel_updates {
2196                 {
2197                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2198                         *feerate_lock += 20;
2199                 }
2200                 nodes[0].node.timer_tick_occurred();
2201                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2202         }
2203
2204         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2205         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2206
2207         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
2208         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2209         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
2210         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2211
2212         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2213         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2214         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2215
2216         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2217         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2218         assert_eq!(as_reconnect_msgs.len(), 2);
2219         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2220         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2221                 { updates } else { panic!(); };
2222         assert!(update_msgs.update_fee.is_some());
2223         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2224         if parallel_updates {
2225                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2226                 check_added_monitors!(nodes[1], 1);
2227                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2228                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2229                 check_added_monitors!(nodes[0], 1);
2230                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2231
2232                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2233                 check_added_monitors!(nodes[0], 1);
2234                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2235
2236                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2237                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2238                 check_added_monitors!(nodes[1], 1);
2239                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2240
2241                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2242                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2243                 check_added_monitors!(nodes[1], 1);
2244
2245                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2246                 check_added_monitors!(nodes[0], 1);
2247
2248                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2249                 check_added_monitors!(nodes[0], 1);
2250                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2251
2252                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2253                 check_added_monitors!(nodes[1], 1);
2254         } else {
2255                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2256         }
2257
2258         send_payment(&nodes[0], &[&nodes[1]], 1000);
2259 }
2260 #[test]
2261 fn update_fee_resend_test() {
2262         do_update_fee_resend_test(false, false);
2263         do_update_fee_resend_test(true, false);
2264         do_update_fee_resend_test(false, true);
2265         do_update_fee_resend_test(true, true);
2266 }
2267
2268 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2269         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2270         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2271         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2272         // which failed in such a case).
2273         let chanmon_cfgs = create_chanmon_cfgs(2);
2274         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2275         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2276         let persister: test_utils::TestPersister;
2277         let new_chain_monitor: test_utils::TestChainMonitor;
2278         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2279         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2280
2281         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2282         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2283         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2284
2285         // Do a really complicated dance to get an HTLC into the holding cell, with
2286         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2287         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2288         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2289         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2290         // flags.
2291         //
2292         // We do this by:
2293         //  a) routing a payment from node B to node A,
2294         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2295         //     putting node A in AwaitingRemoteRevoke,
2296         //  c) sending a second payment from node A to node B, which is immediately placed in the
2297         //     holding cell,
2298         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2299         //     when we try to persist the payment preimage,
2300         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2301         //     clearing AwaitingRemoteRevoke on node A.
2302         //
2303         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2304         // (c) will not be freed from the holding cell.
2305         let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2306
2307         nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2308                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2309         check_added_monitors!(nodes[0], 1);
2310         let send = SendEvent::from_node(&nodes[0]);
2311         assert_eq!(send.msgs.len(), 1);
2312
2313         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2314                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2315         check_added_monitors!(nodes[0], 0);
2316
2317         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2318         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2319         nodes[0].node.claim_funds(payment_preimage_0);
2320         check_added_monitors!(nodes[0], 1);
2321
2322         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2323         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2324         check_added_monitors!(nodes[1], 1);
2325
2326         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2327
2328         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2329         check_added_monitors!(nodes[0], 1);
2330
2331         if disconnect {
2332                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2333                 // disconnect the peers. Note that the fuzzer originally found this issue because
2334                 // deserializing a ChannelManager in this state causes an assertion failure.
2335                 if reload_a {
2336                         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2337                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2338                 } else {
2339                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2340                 }
2341                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2342
2343                 // Now reconnect the two
2344                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
2345                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2346                 assert_eq!(reestablish_1.len(), 1);
2347                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
2348                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2349                 assert_eq!(reestablish_2.len(), 1);
2350
2351                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2352                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2353                 check_added_monitors!(nodes[1], 0);
2354
2355                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2356                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2357
2358                 assert!(resp_0.0.is_none());
2359                 assert!(resp_0.1.is_none());
2360                 assert!(resp_0.2.is_none());
2361                 assert!(resp_1.0.is_none());
2362                 assert!(resp_1.1.is_none());
2363
2364                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2365                 // moment).
2366                 if let Some(pending_cs) = resp_1.2 {
2367                         assert!(pending_cs.update_add_htlcs.is_empty());
2368                         assert!(pending_cs.update_fail_htlcs.is_empty());
2369                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2370                         assert_eq!(pending_cs.commitment_signed, cs);
2371                 } else { panic!(); }
2372
2373                 // There should be no monitor updates as we are still pending awaiting a failed one.
2374                 check_added_monitors!(nodes[0], 0);
2375                 check_added_monitors!(nodes[1], 0);
2376         }
2377
2378         // If we finish updating the monitor, we should free the holding cell right away (this did
2379         // not occur prior to #756).
2380         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2381         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2382         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2383         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2384
2385         // New outbound messages should be generated immediately upon a call to
2386         // get_and_clear_pending_msg_events (but not before).
2387         check_added_monitors!(nodes[0], 0);
2388         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2389         check_added_monitors!(nodes[0], 1);
2390         assert_eq!(events.len(), 1);
2391
2392         // Deliver the pending in-flight CS
2393         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2394         check_added_monitors!(nodes[0], 1);
2395
2396         let commitment_msg = match events.pop().unwrap() {
2397                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2398                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2399                         assert!(updates.update_fail_htlcs.is_empty());
2400                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2401                         assert!(updates.update_fee.is_none());
2402                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2403                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2404                         expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
2405                         assert_eq!(updates.update_add_htlcs.len(), 1);
2406                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2407                         updates.commitment_signed
2408                 },
2409                 _ => panic!("Unexpected event type!"),
2410         };
2411
2412         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2413         check_added_monitors!(nodes[1], 1);
2414
2415         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2416         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2417         expect_pending_htlcs_forwardable!(nodes[1]);
2418         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2419         check_added_monitors!(nodes[1], 1);
2420
2421         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
2422
2423         let events = nodes[1].node.get_and_clear_pending_events();
2424         assert_eq!(events.len(), 2);
2425         match events[0] {
2426                 Event::PendingHTLCsForwardable { .. } => { },
2427                 _ => panic!("Unexpected event"),
2428         };
2429         match events[1] {
2430                 Event::PaymentPathSuccessful { .. } => { },
2431                 _ => panic!("Unexpected event"),
2432         };
2433
2434         nodes[1].node.process_pending_htlc_forwards();
2435         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2436
2437         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2438         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2439 }
2440 #[test]
2441 fn channel_holding_cell_serialize() {
2442         do_channel_holding_cell_serialize(true, true);
2443         do_channel_holding_cell_serialize(true, false);
2444         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2445 }
2446
2447 #[derive(PartialEq)]
2448 enum HTLCStatusAtDupClaim {
2449         Received,
2450         HoldingCell,
2451         Cleared,
2452 }
2453 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2454         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2455         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2456         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2457         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2458         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2459         // channel on which the inbound HTLC was received.
2460         let chanmon_cfgs = create_chanmon_cfgs(3);
2461         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2462         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2463         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2464
2465         create_announced_chan_between_nodes(&nodes, 0, 1);
2466         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2467
2468         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2469
2470         let mut as_raa = None;
2471         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2472                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2473                 // awaiting a remote revoke_and_ack from nodes[0].
2474                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2475                 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2476                         RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2477                 check_added_monitors!(nodes[0], 1);
2478
2479                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2480                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2481                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2482                 check_added_monitors!(nodes[1], 1);
2483
2484                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2485                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2486                 check_added_monitors!(nodes[0], 1);
2487                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2488                 check_added_monitors!(nodes[0], 1);
2489
2490                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2491         }
2492
2493         let fulfill_msg = msgs::UpdateFulfillHTLC {
2494                 channel_id: chan_id_2,
2495                 htlc_id: 0,
2496                 payment_preimage,
2497         };
2498         if second_fails {
2499                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2500                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2501                 check_added_monitors!(nodes[2], 1);
2502                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2503         } else {
2504                 nodes[2].node.claim_funds(payment_preimage);
2505                 check_added_monitors!(nodes[2], 1);
2506                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2507
2508                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2509                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2510                 // Check that the message we're about to deliver matches the one generated:
2511                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2512         }
2513         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2514         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2515         check_added_monitors!(nodes[1], 1);
2516
2517         let mut bs_updates = None;
2518         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2519                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2520                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2521                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2522                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2523                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2524                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2525                         expect_payment_path_successful!(nodes[0]);
2526                 }
2527         } else {
2528                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2529         }
2530
2531         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2532         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2533
2534         if second_fails {
2535                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2536                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2537         } else {
2538                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
2539         }
2540
2541         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2542                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2543                 check_added_monitors!(nodes[1], 1);
2544                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2545
2546                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2547                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2548                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2549                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2550         }
2551         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2552                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2553                 expect_payment_path_successful!(nodes[0]);
2554         }
2555 }
2556
2557 #[test]
2558 fn test_reconnect_dup_htlc_claims() {
2559         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2560         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2561         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2562         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2563         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2564         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2565 }
2566
2567 #[test]
2568 fn test_temporary_error_during_shutdown() {
2569         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2570         // close.
2571         let mut config = test_default_channel_config();
2572         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2573
2574         let chanmon_cfgs = create_chanmon_cfgs(2);
2575         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2576         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2577         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2578
2579         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2580
2581         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2582         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2583
2584         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2585         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2586         check_added_monitors!(nodes[1], 1);
2587
2588         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2589         check_added_monitors!(nodes[0], 1);
2590
2591         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2592
2593         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2594         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2595
2596         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2597         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2598         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2599
2600         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2601
2602         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2603         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2604         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2605
2606         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2607         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2608         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2609
2610         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2611         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2612         assert!(none_b.is_none());
2613         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2614
2615         assert_eq!(txn_a, txn_b);
2616         assert_eq!(txn_a.len(), 1);
2617         check_spends!(txn_a[0], funding_tx);
2618         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2619         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2620 }
2621
2622 #[test]
2623 fn test_permanent_error_during_sending_shutdown() {
2624         // Test that permanent failures when updating the monitor's shutdown script result in a force
2625         // close when initiating a cooperative close.
2626         let mut config = test_default_channel_config();
2627         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2628
2629         let chanmon_cfgs = create_chanmon_cfgs(2);
2630         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2631         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2632         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2633
2634         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2635         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2636
2637         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2638
2639         // We always send the `shutdown` response when initiating a shutdown, even if we immediately
2640         // close the channel thereafter.
2641         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2642         assert_eq!(msg_events.len(), 3);
2643         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2644         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2645         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2646
2647         check_added_monitors!(nodes[0], 2);
2648         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2649 }
2650
2651 #[test]
2652 fn test_permanent_error_during_handling_shutdown() {
2653         // Test that permanent failures when updating the monitor's shutdown script result in a force
2654         // close when handling a cooperative close.
2655         let mut config = test_default_channel_config();
2656         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2657
2658         let chanmon_cfgs = create_chanmon_cfgs(2);
2659         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2660         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2661         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2662
2663         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2664         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2665
2666         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2667         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2668         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2669
2670         // We always send the `shutdown` response when receiving a shutdown, even if we immediately
2671         // close the channel thereafter.
2672         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2673         assert_eq!(msg_events.len(), 3);
2674         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2675         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2676         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2677
2678         check_added_monitors!(nodes[1], 2);
2679         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2680 }
2681
2682 #[test]
2683 fn double_temp_error() {
2684         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2685         let chanmon_cfgs = create_chanmon_cfgs(2);
2686         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2687         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2688         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2689
2690         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2691
2692         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2693         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2694
2695         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2696         // `claim_funds` results in a ChannelMonitorUpdate.
2697         nodes[1].node.claim_funds(payment_preimage_1);
2698         check_added_monitors!(nodes[1], 1);
2699         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2700
2701         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2702         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2703         // which had some asserts that prevented it from being called twice.
2704         nodes[1].node.claim_funds(payment_preimage_2);
2705         check_added_monitors!(nodes[1], 1);
2706         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2707
2708         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2709         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2710         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2711         check_added_monitors!(nodes[1], 0);
2712         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2713
2714         // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2715         // and get both PaymentClaimed events at once.
2716         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2717
2718         let events = nodes[1].node.get_and_clear_pending_events();
2719         assert_eq!(events.len(), 2);
2720         match events[0] {
2721                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2722                 _ => panic!("Unexpected Event: {:?}", events[0]),
2723         }
2724         match events[1] {
2725                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2726                 _ => panic!("Unexpected Event: {:?}", events[1]),
2727         }
2728
2729         assert_eq!(msg_events.len(), 1);
2730         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2731                 match &msg_events[0] {
2732                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2733                                 assert!(update_add_htlcs.is_empty());
2734                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2735                                 assert!(update_fail_htlcs.is_empty());
2736                                 assert!(update_fail_malformed_htlcs.is_empty());
2737                                 assert!(update_fee.is_none());
2738                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2739                         },
2740                         _ => panic!("Unexpected event"),
2741                 }
2742         };
2743         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2744         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2745         check_added_monitors!(nodes[0], 0);
2746         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2747         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2748         check_added_monitors!(nodes[0], 1);
2749         nodes[0].node.process_pending_htlc_forwards();
2750         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2751         check_added_monitors!(nodes[1], 0);
2752         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2753         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2754         check_added_monitors!(nodes[1], 1);
2755         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2756         check_added_monitors!(nodes[1], 1);
2757
2758         // Complete the second HTLC.
2759         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2760                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2761                 assert_eq!(events.len(), 2);
2762                 (match &events[0] {
2763                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2764                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2765                                 assert!(updates.update_add_htlcs.is_empty());
2766                                 assert!(updates.update_fail_htlcs.is_empty());
2767                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2768                                 assert!(updates.update_fee.is_none());
2769                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2770                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2771                         },
2772                         _ => panic!("Unexpected event"),
2773                 },
2774                  match events[1] {
2775                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2776                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2777                                  (*msg).clone()
2778                          },
2779                          _ => panic!("Unexpected event"),
2780                  })
2781         };
2782         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2783         check_added_monitors!(nodes[0], 1);
2784         expect_payment_path_successful!(nodes[0]);
2785
2786         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2787         check_added_monitors!(nodes[0], 0);
2788         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2789         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2790         expect_payment_sent!(nodes[0], payment_preimage_2);
2791 }
2792
2793 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2794         // Test that if the monitor update generated in funding_signed is stored async and we restart
2795         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2796         // drop the channel and move on.
2797         let chanmon_cfgs = create_chanmon_cfgs(2);
2798         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2799
2800         let persister: test_utils::TestPersister;
2801         let new_chain_monitor: test_utils::TestChainMonitor;
2802         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2803
2804         let mut chan_config = test_default_channel_config();
2805         chan_config.manually_accept_inbound_channels = true;
2806         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2807
2808         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2809         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2810
2811         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2812         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2813
2814         let events = nodes[1].node.get_and_clear_pending_events();
2815         assert_eq!(events.len(), 1);
2816         match events[0] {
2817                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2818                         if use_0conf {
2819                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2820                         } else {
2821                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2822                         }
2823                 },
2824                 _ => panic!("Unexpected event"),
2825         };
2826
2827         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2828
2829         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2830
2831         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2832         check_added_monitors!(nodes[0], 0);
2833
2834         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2835         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2836         check_added_monitors!(nodes[1], 1);
2837         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2838
2839         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2840         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2841         match &bs_signed_locked[0] {
2842                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2843                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2844
2845                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2846                         check_added_monitors!(nodes[0], 1);
2847                 }
2848                 _ => panic!("Unexpected event"),
2849         }
2850         if use_0conf {
2851                 match &bs_signed_locked[1] {
2852                         MessageSendEvent::SendChannelReady { msg, .. } => {
2853                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2854                         }
2855                         _ => panic!("Unexpected event"),
2856                 }
2857         }
2858
2859         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2860         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2861         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2862
2863         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2864         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2865         // ChannelMonitor lost, we should simply discard the channel.
2866
2867         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2868         // not, so we have to clear them here.
2869         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2870         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2871
2872         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2873         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
2874         assert!(nodes[0].node.list_channels().is_empty());
2875 }
2876
2877 #[test]
2878 fn test_outbound_reload_without_init_mon() {
2879         do_test_outbound_reload_without_init_mon(true);
2880         do_test_outbound_reload_without_init_mon(false);
2881 }
2882
2883 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2884         // Test that if the monitor update generated by funding_transaction_generated is stored async
2885         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2886         // completed we happily drop the channel and move on.
2887         let chanmon_cfgs = create_chanmon_cfgs(2);
2888         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2889
2890         let persister: test_utils::TestPersister;
2891         let new_chain_monitor: test_utils::TestChainMonitor;
2892         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2893
2894         let mut chan_config = test_default_channel_config();
2895         chan_config.manually_accept_inbound_channels = true;
2896         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2897
2898         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2899         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2900
2901         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2902         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2903
2904         let events = nodes[1].node.get_and_clear_pending_events();
2905         assert_eq!(events.len(), 1);
2906         match events[0] {
2907                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2908                         if use_0conf {
2909                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2910                         } else {
2911                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2912                         }
2913                 },
2914                 _ => panic!("Unexpected event"),
2915         };
2916
2917         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2918
2919         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2920
2921         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2922         check_added_monitors!(nodes[0], 0);
2923
2924         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2925         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2926         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2927         check_added_monitors!(nodes[1], 1);
2928
2929         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2930         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2931         // transaction is confirmed.
2932         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2933
2934         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2935         check_added_monitors!(nodes[0], 1);
2936         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2937
2938         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2939         if lock_commitment {
2940                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
2941                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
2942         }
2943         if use_0conf || lock_commitment {
2944                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
2945                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
2946         }
2947         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2948
2949         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
2950         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
2951         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
2952
2953         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2954         // not, so we have to clear them here.
2955         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
2956         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
2957
2958         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
2959
2960         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
2961         assert!(nodes[1].node.list_channels().is_empty());
2962 }
2963
2964 #[test]
2965 fn test_inbound_reload_without_init_mon() {
2966         do_test_inbound_reload_without_init_mon(true, true);
2967         do_test_inbound_reload_without_init_mon(true, false);
2968         do_test_inbound_reload_without_init_mon(false, true);
2969         do_test_inbound_reload_without_init_mon(false, false);
2970 }