Move `Channel` fields into `ChannelContext` struct
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::constants::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::AnnouncementSigsState;
24 use crate::ln::msgs;
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::enforcing_trait_impls::EnforcingSigner;
27 use crate::util::errors::APIError;
28 use crate::util::ser::{ReadableArgs, Writeable};
29 use crate::util::test_utils::TestBroadcaster;
30
31 use crate::ln::functional_test_utils::*;
32
33 use crate::util::test_utils;
34
35 use crate::io;
36 use bitcoin::hashes::Hash;
37 use crate::prelude::*;
38 use crate::sync::{Arc, Mutex};
39
40 #[test]
41 fn test_simple_monitor_permanent_update_fail() {
42         // Test that we handle a simple permanent monitor update failure
43         let chanmon_cfgs = create_chanmon_cfgs(2);
44         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
45         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
46         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47         create_announced_chan_between_nodes(&nodes, 0, 1);
48
49         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
50         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
51         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
52                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
53                 ), true, APIError::ChannelUnavailable {..}, {});
54         check_added_monitors!(nodes[0], 2);
55
56         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57         assert_eq!(events_1.len(), 2);
58         match events_1[0] {
59                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60                 _ => panic!("Unexpected event"),
61         };
62         match events_1[1] {
63                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64                 _ => panic!("Unexpected event"),
65         };
66
67         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
68
69         // TODO: Once we hit the chain with the failure transaction we should check that we get a
70         // PaymentPathFailed event
71
72         assert_eq!(nodes[0].node.list_channels().len(), 0);
73         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
74 }
75
76 #[test]
77 fn test_monitor_and_persister_update_fail() {
78         // Test that if both updating the `ChannelMonitor` and persisting the updated
79         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
80         // one that gets returned.
81         let chanmon_cfgs = create_chanmon_cfgs(2);
82         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
83         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
84         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
85
86         // Create some initial channel
87         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
88         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
89
90         // Rebalance the network to generate htlc in the two directions
91         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
92
93         // Route an HTLC from node 0 to node 1 (but don't settle)
94         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
95
96         // Make a copy of the ChainMonitor so we can capture the error it returns on a
97         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
98         // directly, the node would fail to be `Drop`'d at the end because its
99         // ChannelManager and ChainMonitor would be out of sync.
100         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
101         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
102         let persister = test_utils::TestPersister::new();
103         let tx_broadcaster = TestBroadcaster {
104                 txn_broadcasted: Mutex::new(Vec::new()),
105                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
106                 // that we are at height 200 so that it doesn't think we're violating the time lock
107                 // requirements of transactions broadcasted at that point.
108                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
109         };
110         let chain_mon = {
111                 let new_monitor = {
112                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
113                         let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
114                                 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
115                         assert!(new_monitor == *monitor);
116                         new_monitor
117                 };
118                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
119                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
120                 chain_mon
121         };
122         chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
123
124         // Set the persister's return value to be a InProgress.
125         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
126
127         // Try to update ChannelMonitor
128         nodes[1].node.claim_funds(preimage);
129         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
130         check_added_monitors!(nodes[1], 1);
131
132         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
133         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
134         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
135         {
136                 let mut node_0_per_peer_lock;
137                 let mut node_0_peer_state_lock;
138                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
139                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
140                         // Check that even though the persister is returning a InProgress,
141                         // because the update is bogus, ultimately the error that's returned
142                         // should be a PermanentFailure.
143                         if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
144                         logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
145                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
146                 } else { assert!(false); }
147         }
148
149         check_added_monitors!(nodes[0], 1);
150         let events = nodes[0].node.get_and_clear_pending_events();
151         assert_eq!(events.len(), 1);
152 }
153
154 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
155         // Test that we can recover from a simple temporary monitor update failure optionally with
156         // a disconnect in between
157         let chanmon_cfgs = create_chanmon_cfgs(2);
158         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
159         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
160         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
161         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
162
163         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
164
165         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
166
167         {
168                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
169                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
170                         ), false, APIError::MonitorUpdateInProgress, {});
171                 check_added_monitors!(nodes[0], 1);
172         }
173
174         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
175         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
176         assert_eq!(nodes[0].node.list_channels().len(), 1);
177
178         if disconnect {
179                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
180                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
181                 reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
182         }
183
184         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
185         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
186         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
187         check_added_monitors!(nodes[0], 0);
188
189         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
190         assert_eq!(events_2.len(), 1);
191         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
192         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
193         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
194         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
195
196         expect_pending_htlcs_forwardable!(nodes[1]);
197
198         let events_3 = nodes[1].node.get_and_clear_pending_events();
199         assert_eq!(events_3.len(), 1);
200         match events_3[0] {
201                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
202                         assert_eq!(payment_hash_1, *payment_hash);
203                         assert_eq!(amount_msat, 1_000_000);
204                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
205                         assert_eq!(via_channel_id, Some(channel_id));
206                         match &purpose {
207                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
208                                         assert!(payment_preimage.is_none());
209                                         assert_eq!(payment_secret_1, *payment_secret);
210                                 },
211                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
212                         }
213                 },
214                 _ => panic!("Unexpected event"),
215         }
216
217         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
218
219         // Now set it to failed again...
220         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
221         {
222                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
223                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
224                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
225                         ), false, APIError::MonitorUpdateInProgress, {});
226                 check_added_monitors!(nodes[0], 1);
227         }
228
229         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
230         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
231         assert_eq!(nodes[0].node.list_channels().len(), 1);
232
233         if disconnect {
234                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
235                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
236                 reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
237         }
238
239         // ...and make sure we can force-close a frozen channel
240         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
241         check_added_monitors!(nodes[0], 1);
242         check_closed_broadcast!(nodes[0], true);
243
244         // TODO: Once we hit the chain with the failure transaction we should check that we get a
245         // PaymentPathFailed event
246
247         assert_eq!(nodes[0].node.list_channels().len(), 0);
248         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
249 }
250
251 #[test]
252 fn test_simple_monitor_temporary_update_fail() {
253         do_test_simple_monitor_temporary_update_fail(false);
254         do_test_simple_monitor_temporary_update_fail(true);
255 }
256
257 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
258         let disconnect_flags = 8 | 16;
259
260         // Test that we can recover from a temporary monitor update failure with some in-flight
261         // HTLCs going on at the same time potentially with some disconnection thrown in.
262         // * First we route a payment, then get a temporary monitor update failure when trying to
263         //   route a second payment. We then claim the first payment.
264         // * If disconnect_count is set, we will disconnect at this point (which is likely as
265         //   InProgress likely indicates net disconnect which resulted in failing to update the
266         //   ChannelMonitor on a watchtower).
267         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
268         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
269         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
270         //   disconnect_count & !disconnect_flags is 0).
271         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
272         //   through message sending, potentially disconnect/reconnecting multiple times based on
273         //   disconnect_count, to get the update_fulfill_htlc through.
274         // * We then walk through more message exchanges to get the original update_add_htlc
275         //   through, swapping message ordering based on disconnect_count & 8 and optionally
276         //   disconnect/reconnecting based on disconnect_count.
277         let chanmon_cfgs = create_chanmon_cfgs(2);
278         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
279         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
280         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
281         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
282
283         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
284
285         // Now try to send a second payment which will fail to send
286         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
287         {
288                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
289                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
290                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
291                         ), false, APIError::MonitorUpdateInProgress, {});
292                 check_added_monitors!(nodes[0], 1);
293         }
294
295         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
296         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
297         assert_eq!(nodes[0].node.list_channels().len(), 1);
298
299         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
300         // but nodes[0] won't respond since it is frozen.
301         nodes[1].node.claim_funds(payment_preimage_1);
302         check_added_monitors!(nodes[1], 1);
303         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
304
305         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
306         assert_eq!(events_2.len(), 1);
307         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
308                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
309                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
310                         assert!(update_add_htlcs.is_empty());
311                         assert_eq!(update_fulfill_htlcs.len(), 1);
312                         assert!(update_fail_htlcs.is_empty());
313                         assert!(update_fail_malformed_htlcs.is_empty());
314                         assert!(update_fee.is_none());
315
316                         if (disconnect_count & 16) == 0 {
317                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
318                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
319                                 assert_eq!(events_3.len(), 1);
320                                 match events_3[0] {
321                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
322                                                 assert_eq!(*payment_preimage, payment_preimage_1);
323                                                 assert_eq!(*payment_hash, payment_hash_1);
324                                         },
325                                         _ => panic!("Unexpected event"),
326                                 }
327
328                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
329                                 check_added_monitors!(nodes[0], 1);
330                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
331                         }
332
333                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
334                 },
335                 _ => panic!("Unexpected event"),
336         };
337
338         if disconnect_count & !disconnect_flags > 0 {
339                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
340                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
341         }
342
343         // Now fix monitor updating...
344         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
345         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
346         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
347         check_added_monitors!(nodes[0], 0);
348
349         macro_rules! disconnect_reconnect_peers { () => { {
350                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
351                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
352
353                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
354                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
355                 }, true).unwrap();
356                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
357                 assert_eq!(reestablish_1.len(), 1);
358                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
359                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
360                 }, false).unwrap();
361                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
362                 assert_eq!(reestablish_2.len(), 1);
363
364                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
365                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
366                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
367                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
368
369                 assert!(as_resp.0.is_none());
370                 assert!(bs_resp.0.is_none());
371
372                 (reestablish_1, reestablish_2, as_resp, bs_resp)
373         } } }
374
375         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
376                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
377                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
378
379                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
380                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
381                 }, true).unwrap();
382                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
383                 assert_eq!(reestablish_1.len(), 1);
384                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
385                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
386                 }, false).unwrap();
387                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
388                 assert_eq!(reestablish_2.len(), 1);
389
390                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
391                 check_added_monitors!(nodes[0], 0);
392                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
393                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
394                 check_added_monitors!(nodes[1], 0);
395                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
396
397                 assert!(as_resp.0.is_none());
398                 assert!(bs_resp.0.is_none());
399
400                 assert!(bs_resp.1.is_none());
401                 if (disconnect_count & 16) == 0 {
402                         assert!(bs_resp.2.is_none());
403
404                         assert!(as_resp.1.is_some());
405                         assert!(as_resp.2.is_some());
406                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
407                 } else {
408                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
409                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
410                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
411                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
412                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
413                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
414
415                         assert!(as_resp.1.is_none());
416
417                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
418                         let events_3 = nodes[0].node.get_and_clear_pending_events();
419                         assert_eq!(events_3.len(), 1);
420                         match events_3[0] {
421                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
422                                         assert_eq!(*payment_preimage, payment_preimage_1);
423                                         assert_eq!(*payment_hash, payment_hash_1);
424                                 },
425                                 _ => panic!("Unexpected event"),
426                         }
427
428                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
429                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
430                         // No commitment_signed so get_event_msg's assert(len == 1) passes
431                         check_added_monitors!(nodes[0], 1);
432
433                         as_resp.1 = Some(as_resp_raa);
434                         bs_resp.2 = None;
435                 }
436
437                 if disconnect_count & !disconnect_flags > 1 {
438                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
439
440                         if (disconnect_count & 16) == 0 {
441                                 assert!(reestablish_1 == second_reestablish_1);
442                                 assert!(reestablish_2 == second_reestablish_2);
443                         }
444                         assert!(as_resp == second_as_resp);
445                         assert!(bs_resp == second_bs_resp);
446                 }
447
448                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
449         } else {
450                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
451                 assert_eq!(events_4.len(), 2);
452                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
453                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
454                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
455                                 msg.clone()
456                         },
457                         _ => panic!("Unexpected event"),
458                 })
459         };
460
461         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
462
463         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
464         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
465         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
466         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
467         check_added_monitors!(nodes[1], 1);
468
469         if disconnect_count & !disconnect_flags > 2 {
470                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
471
472                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
473                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
474
475                 assert!(as_resp.2.is_none());
476                 assert!(bs_resp.2.is_none());
477         }
478
479         let as_commitment_update;
480         let bs_second_commitment_update;
481
482         macro_rules! handle_bs_raa { () => {
483                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
484                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
485                 assert!(as_commitment_update.update_add_htlcs.is_empty());
486                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
487                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
488                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
489                 assert!(as_commitment_update.update_fee.is_none());
490                 check_added_monitors!(nodes[0], 1);
491         } }
492
493         macro_rules! handle_initial_raa { () => {
494                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
495                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
496                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
497                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
498                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
499                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
500                 assert!(bs_second_commitment_update.update_fee.is_none());
501                 check_added_monitors!(nodes[1], 1);
502         } }
503
504         if (disconnect_count & 8) == 0 {
505                 handle_bs_raa!();
506
507                 if disconnect_count & !disconnect_flags > 3 {
508                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
509
510                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
511                         assert!(bs_resp.1.is_none());
512
513                         assert!(as_resp.2.unwrap() == as_commitment_update);
514                         assert!(bs_resp.2.is_none());
515
516                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
517                 }
518
519                 handle_initial_raa!();
520
521                 if disconnect_count & !disconnect_flags > 4 {
522                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
523
524                         assert!(as_resp.1.is_none());
525                         assert!(bs_resp.1.is_none());
526
527                         assert!(as_resp.2.unwrap() == as_commitment_update);
528                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
529                 }
530         } else {
531                 handle_initial_raa!();
532
533                 if disconnect_count & !disconnect_flags > 3 {
534                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
535
536                         assert!(as_resp.1.is_none());
537                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
538
539                         assert!(as_resp.2.is_none());
540                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
541
542                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
543                 }
544
545                 handle_bs_raa!();
546
547                 if disconnect_count & !disconnect_flags > 4 {
548                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
549
550                         assert!(as_resp.1.is_none());
551                         assert!(bs_resp.1.is_none());
552
553                         assert!(as_resp.2.unwrap() == as_commitment_update);
554                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
555                 }
556         }
557
558         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
559         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
560         // No commitment_signed so get_event_msg's assert(len == 1) passes
561         check_added_monitors!(nodes[0], 1);
562
563         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
564         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
565         // No commitment_signed so get_event_msg's assert(len == 1) passes
566         check_added_monitors!(nodes[1], 1);
567
568         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
569         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
570         check_added_monitors!(nodes[1], 1);
571
572         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
573         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
574         check_added_monitors!(nodes[0], 1);
575         expect_payment_path_successful!(nodes[0]);
576
577         expect_pending_htlcs_forwardable!(nodes[1]);
578
579         let events_5 = nodes[1].node.get_and_clear_pending_events();
580         assert_eq!(events_5.len(), 1);
581         match events_5[0] {
582                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
583                         assert_eq!(payment_hash_2, *payment_hash);
584                         assert_eq!(amount_msat, 1_000_000);
585                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
586                         assert_eq!(via_channel_id, Some(channel_id));
587                         match &purpose {
588                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
589                                         assert!(payment_preimage.is_none());
590                                         assert_eq!(payment_secret_2, *payment_secret);
591                                 },
592                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
593                         }
594                 },
595                 _ => panic!("Unexpected event"),
596         }
597
598         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
599 }
600
601 #[test]
602 fn test_monitor_temporary_update_fail_a() {
603         do_test_monitor_temporary_update_fail(0);
604         do_test_monitor_temporary_update_fail(1);
605         do_test_monitor_temporary_update_fail(2);
606         do_test_monitor_temporary_update_fail(3);
607         do_test_monitor_temporary_update_fail(4);
608         do_test_monitor_temporary_update_fail(5);
609 }
610
611 #[test]
612 fn test_monitor_temporary_update_fail_b() {
613         do_test_monitor_temporary_update_fail(2 | 8);
614         do_test_monitor_temporary_update_fail(3 | 8);
615         do_test_monitor_temporary_update_fail(4 | 8);
616         do_test_monitor_temporary_update_fail(5 | 8);
617 }
618
619 #[test]
620 fn test_monitor_temporary_update_fail_c() {
621         do_test_monitor_temporary_update_fail(1 | 16);
622         do_test_monitor_temporary_update_fail(2 | 16);
623         do_test_monitor_temporary_update_fail(3 | 16);
624         do_test_monitor_temporary_update_fail(2 | 8 | 16);
625         do_test_monitor_temporary_update_fail(3 | 8 | 16);
626 }
627
628 #[test]
629 fn test_monitor_update_fail_cs() {
630         // Tests handling of a monitor update failure when processing an incoming commitment_signed
631         let chanmon_cfgs = create_chanmon_cfgs(2);
632         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
633         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
634         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
635         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
636
637         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
638         {
639                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
640                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
641                 check_added_monitors!(nodes[0], 1);
642         }
643
644         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
645         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
646
647         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
648         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
649         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
650         check_added_monitors!(nodes[1], 1);
651         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
652
653         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
654         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
655         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
656         check_added_monitors!(nodes[1], 0);
657         let responses = nodes[1].node.get_and_clear_pending_msg_events();
658         assert_eq!(responses.len(), 2);
659
660         match responses[0] {
661                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
662                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
663                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
664                         check_added_monitors!(nodes[0], 1);
665                 },
666                 _ => panic!("Unexpected event"),
667         }
668         match responses[1] {
669                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
670                         assert!(updates.update_add_htlcs.is_empty());
671                         assert!(updates.update_fulfill_htlcs.is_empty());
672                         assert!(updates.update_fail_htlcs.is_empty());
673                         assert!(updates.update_fail_malformed_htlcs.is_empty());
674                         assert!(updates.update_fee.is_none());
675                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
676
677                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
678                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
679                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
680                         check_added_monitors!(nodes[0], 1);
681                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
682                 },
683                 _ => panic!("Unexpected event"),
684         }
685
686         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
687         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
688         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
689         check_added_monitors!(nodes[0], 0);
690
691         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
692         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
693         check_added_monitors!(nodes[1], 1);
694
695         expect_pending_htlcs_forwardable!(nodes[1]);
696
697         let events = nodes[1].node.get_and_clear_pending_events();
698         assert_eq!(events.len(), 1);
699         match events[0] {
700                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
701                         assert_eq!(payment_hash, our_payment_hash);
702                         assert_eq!(amount_msat, 1_000_000);
703                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
704                         assert_eq!(via_channel_id, Some(channel_id));
705                         match &purpose {
706                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
707                                         assert!(payment_preimage.is_none());
708                                         assert_eq!(our_payment_secret, *payment_secret);
709                                 },
710                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
711                         }
712                 },
713                 _ => panic!("Unexpected event"),
714         };
715
716         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
717 }
718
719 #[test]
720 fn test_monitor_update_fail_no_rebroadcast() {
721         // Tests handling of a monitor update failure when no message rebroadcasting on
722         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
723         // fuzz tests.
724         let chanmon_cfgs = create_chanmon_cfgs(2);
725         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
726         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
727         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
728         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
729
730         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
731         {
732                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
733                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
734                 check_added_monitors!(nodes[0], 1);
735         }
736
737         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
738         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
739         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
740
741         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
742         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
743         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
744         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
745         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
746         check_added_monitors!(nodes[1], 1);
747
748         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
749         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
750         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
751         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
752         check_added_monitors!(nodes[1], 0);
753         expect_pending_htlcs_forwardable!(nodes[1]);
754
755         let events = nodes[1].node.get_and_clear_pending_events();
756         assert_eq!(events.len(), 1);
757         match events[0] {
758                 Event::PaymentClaimable { payment_hash, .. } => {
759                         assert_eq!(payment_hash, our_payment_hash);
760                 },
761                 _ => panic!("Unexpected event"),
762         }
763
764         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
765 }
766
767 #[test]
768 fn test_monitor_update_raa_while_paused() {
769         // Tests handling of an RAA while monitor updating has already been marked failed.
770         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
771         let chanmon_cfgs = create_chanmon_cfgs(2);
772         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
773         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
774         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
775         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
776
777         send_payment(&nodes[0], &[&nodes[1]], 5000000);
778         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
779         {
780                 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
781                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
782                 check_added_monitors!(nodes[0], 1);
783         }
784         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
785
786         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
787         {
788                 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
789                         RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
790                 check_added_monitors!(nodes[1], 1);
791         }
792         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
793
794         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
795         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
796         check_added_monitors!(nodes[1], 1);
797         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
798
799         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
800         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
801         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
802         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
803         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
804         check_added_monitors!(nodes[0], 1);
805         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
806
807         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
808         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
809         check_added_monitors!(nodes[0], 1);
810
811         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
812         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
813         check_added_monitors!(nodes[0], 0);
814
815         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
816         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
817         check_added_monitors!(nodes[1], 1);
818         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
819
820         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
821         check_added_monitors!(nodes[1], 1);
822         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
823
824         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
825         check_added_monitors!(nodes[0], 1);
826         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
827
828         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
829         check_added_monitors!(nodes[0], 1);
830         expect_pending_htlcs_forwardable!(nodes[0]);
831         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
832
833         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
834         check_added_monitors!(nodes[1], 1);
835         expect_pending_htlcs_forwardable!(nodes[1]);
836         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
837
838         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
839         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
840 }
841
842 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
843         // Tests handling of a monitor update failure when processing an incoming RAA
844         let chanmon_cfgs = create_chanmon_cfgs(3);
845         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
846         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
847         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
848         create_announced_chan_between_nodes(&nodes, 0, 1);
849         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
850
851         // Rebalance a bit so that we can send backwards from 2 to 1.
852         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
853
854         // Route a first payment that we'll fail backwards
855         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
856
857         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
858         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
859         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
860         check_added_monitors!(nodes[2], 1);
861
862         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
863         assert!(updates.update_add_htlcs.is_empty());
864         assert!(updates.update_fulfill_htlcs.is_empty());
865         assert_eq!(updates.update_fail_htlcs.len(), 1);
866         assert!(updates.update_fail_malformed_htlcs.is_empty());
867         assert!(updates.update_fee.is_none());
868         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
869
870         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
871         check_added_monitors!(nodes[0], 0);
872
873         // While the second channel is AwaitingRAA, forward a second payment to get it into the
874         // holding cell.
875         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
876         {
877                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
878                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
879                 check_added_monitors!(nodes[0], 1);
880         }
881
882         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
883         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
884         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
885
886         expect_pending_htlcs_forwardable!(nodes[1]);
887         check_added_monitors!(nodes[1], 0);
888         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
889
890         // Now fail monitor updating.
891         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
892         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
893         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
894         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
895         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
896         check_added_monitors!(nodes[1], 1);
897
898         // Forward a third payment which will also be added to the holding cell, despite the channel
899         // being paused waiting a monitor update.
900         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
901         {
902                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
903                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
904                 check_added_monitors!(nodes[0], 1);
905         }
906
907         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
908         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
909         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
910         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
911         check_added_monitors!(nodes[1], 0);
912
913         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
914         // and not forwarded.
915         expect_pending_htlcs_forwardable!(nodes[1]);
916         check_added_monitors!(nodes[1], 0);
917         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
918
919         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
920                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
921                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
922                 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
923                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
924                 check_added_monitors!(nodes[2], 1);
925
926                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
927                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
928                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
929                 check_added_monitors!(nodes[1], 1);
930                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
931                 (Some(payment_preimage_4), Some(payment_hash_4))
932         } else { (None, None) };
933
934         // Restore monitor updating, ensuring we immediately get a fail-back update and a
935         // update_add update.
936         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
937         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
938         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
939         check_added_monitors!(nodes[1], 0);
940         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
941         check_added_monitors!(nodes[1], 1);
942
943         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
944         if test_ignore_second_cs {
945                 assert_eq!(events_3.len(), 3);
946         } else {
947                 assert_eq!(events_3.len(), 2);
948         }
949
950         // Note that the ordering of the events for different nodes is non-prescriptive, though the
951         // ordering of the two events that both go to nodes[2] have to stay in the same order.
952         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
953         let messages_a = match nodes_0_event {
954                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
955                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
956                         assert!(updates.update_fulfill_htlcs.is_empty());
957                         assert_eq!(updates.update_fail_htlcs.len(), 1);
958                         assert!(updates.update_fail_malformed_htlcs.is_empty());
959                         assert!(updates.update_add_htlcs.is_empty());
960                         assert!(updates.update_fee.is_none());
961                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
962                 },
963                 _ => panic!("Unexpected event type!"),
964         };
965
966         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
967         let send_event_b = SendEvent::from_event(nodes_2_event);
968         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
969
970         let raa = if test_ignore_second_cs {
971                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
972                 match nodes_2_event {
973                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
974                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
975                                 Some(msg.clone())
976                         },
977                         _ => panic!("Unexpected event"),
978                 }
979         } else { None };
980
981         // Now deliver the new messages...
982
983         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
984         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
985         expect_payment_failed!(nodes[0], payment_hash_1, true);
986
987         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
988         let as_cs;
989         if test_ignore_second_cs {
990                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
991                 check_added_monitors!(nodes[2], 1);
992                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
993                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
994                 check_added_monitors!(nodes[2], 1);
995                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
996                 assert!(bs_cs.update_add_htlcs.is_empty());
997                 assert!(bs_cs.update_fail_htlcs.is_empty());
998                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
999                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
1000                 assert!(bs_cs.update_fee.is_none());
1001
1002                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1003                 check_added_monitors!(nodes[1], 1);
1004                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1005
1006                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1007                 check_added_monitors!(nodes[1], 1);
1008         } else {
1009                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1010                 check_added_monitors!(nodes[2], 1);
1011
1012                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1013                 // As both messages are for nodes[1], they're in order.
1014                 assert_eq!(bs_revoke_and_commit.len(), 2);
1015                 match bs_revoke_and_commit[0] {
1016                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1017                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1018                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1019                                 check_added_monitors!(nodes[1], 1);
1020                         },
1021                         _ => panic!("Unexpected event"),
1022                 }
1023
1024                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1025
1026                 match bs_revoke_and_commit[1] {
1027                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1028                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1029                                 assert!(updates.update_add_htlcs.is_empty());
1030                                 assert!(updates.update_fail_htlcs.is_empty());
1031                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1032                                 assert!(updates.update_fulfill_htlcs.is_empty());
1033                                 assert!(updates.update_fee.is_none());
1034                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1035                                 check_added_monitors!(nodes[1], 1);
1036                         },
1037                         _ => panic!("Unexpected event"),
1038                 }
1039         }
1040
1041         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1042         assert!(as_cs.update_fail_htlcs.is_empty());
1043         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1044         assert!(as_cs.update_fulfill_htlcs.is_empty());
1045         assert!(as_cs.update_fee.is_none());
1046         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1047
1048
1049         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1050         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1051         check_added_monitors!(nodes[2], 1);
1052         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1053
1054         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1055         check_added_monitors!(nodes[2], 1);
1056         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1057
1058         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1059         check_added_monitors!(nodes[1], 1);
1060         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1061
1062         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1063         check_added_monitors!(nodes[1], 1);
1064         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1065
1066         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1067         check_added_monitors!(nodes[2], 1);
1068         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1069
1070         expect_pending_htlcs_forwardable!(nodes[2]);
1071
1072         let events_6 = nodes[2].node.get_and_clear_pending_events();
1073         assert_eq!(events_6.len(), 2);
1074         match events_6[0] {
1075                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1076                 _ => panic!("Unexpected event"),
1077         };
1078         match events_6[1] {
1079                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1080                 _ => panic!("Unexpected event"),
1081         };
1082
1083         if test_ignore_second_cs {
1084                 expect_pending_htlcs_forwardable!(nodes[1]);
1085                 check_added_monitors!(nodes[1], 1);
1086
1087                 send_event = SendEvent::from_node(&nodes[1]);
1088                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1089                 assert_eq!(send_event.msgs.len(), 1);
1090                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1091                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1092
1093                 expect_pending_htlcs_forwardable!(nodes[0]);
1094
1095                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1096                 assert_eq!(events_9.len(), 1);
1097                 match events_9[0] {
1098                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1099                         _ => panic!("Unexpected event"),
1100                 };
1101                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1102         }
1103
1104         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1105 }
1106
1107 #[test]
1108 fn test_monitor_update_fail_raa() {
1109         do_test_monitor_update_fail_raa(false);
1110         do_test_monitor_update_fail_raa(true);
1111 }
1112
1113 #[test]
1114 fn test_monitor_update_fail_reestablish() {
1115         // Simple test for message retransmission after monitor update failure on
1116         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1117         // HTLCs).
1118         let chanmon_cfgs = create_chanmon_cfgs(3);
1119         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1120         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1121         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1122         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1123         create_announced_chan_between_nodes(&nodes, 1, 2);
1124
1125         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1126
1127         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1128         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1129
1130         nodes[2].node.claim_funds(payment_preimage);
1131         check_added_monitors!(nodes[2], 1);
1132         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1133
1134         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1135         assert!(updates.update_add_htlcs.is_empty());
1136         assert!(updates.update_fail_htlcs.is_empty());
1137         assert!(updates.update_fail_malformed_htlcs.is_empty());
1138         assert!(updates.update_fee.is_none());
1139         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1140         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1141         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1142         check_added_monitors!(nodes[1], 1);
1143         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1144         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1145
1146         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1147         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1148                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1149         }, true).unwrap();
1150         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1151                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1152         }, false).unwrap();
1153
1154         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1155         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1156
1157         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1158
1159         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1160         assert_eq!(
1161                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1162                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1163
1164         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1165         check_added_monitors!(nodes[1], 1);
1166
1167         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1168         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1169
1170         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1171                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1172         }, true).unwrap();
1173         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1174                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1175         }, false).unwrap();
1176
1177         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1178         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1179
1180         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1181         assert_eq!(
1182                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1183                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1184
1185         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1186         check_added_monitors!(nodes[1], 0);
1187         assert_eq!(
1188                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1189                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1190
1191         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1192         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1193         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1194         check_added_monitors!(nodes[1], 0);
1195
1196         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1197         assert!(updates.update_add_htlcs.is_empty());
1198         assert!(updates.update_fail_htlcs.is_empty());
1199         assert!(updates.update_fail_malformed_htlcs.is_empty());
1200         assert!(updates.update_fee.is_none());
1201         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1202         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1203         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1204         expect_payment_sent!(nodes[0], payment_preimage);
1205 }
1206
1207 #[test]
1208 fn raa_no_response_awaiting_raa_state() {
1209         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1210         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1211         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1212         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1213         let chanmon_cfgs = create_chanmon_cfgs(2);
1214         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1215         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1216         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1217         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1218
1219         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1220         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1221         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1222
1223         // Queue up two payments - one will be delivered right away, one immediately goes into the
1224         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1225         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1226         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1227         // generation during RAA while in monitor-update-failed state.
1228         {
1229                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1230                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1231                 check_added_monitors!(nodes[0], 1);
1232                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1233                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1234                 check_added_monitors!(nodes[0], 0);
1235         }
1236
1237         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1238         assert_eq!(events.len(), 1);
1239         let payment_event = SendEvent::from_event(events.pop().unwrap());
1240         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1241         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1242         check_added_monitors!(nodes[1], 1);
1243
1244         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1245         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1246         check_added_monitors!(nodes[0], 1);
1247         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1248         assert_eq!(events.len(), 1);
1249         let payment_event = SendEvent::from_event(events.pop().unwrap());
1250
1251         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1252         check_added_monitors!(nodes[0], 1);
1253         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1254
1255         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1256         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1257         // then restore channel monitor updates.
1258         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1259         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1260         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1261         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1262         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1263         check_added_monitors!(nodes[1], 1);
1264         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1265
1266         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1267         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1268         check_added_monitors!(nodes[1], 1);
1269
1270         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1271         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1272         // nodes[1] should be AwaitingRAA here!
1273         check_added_monitors!(nodes[1], 0);
1274         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1275         expect_pending_htlcs_forwardable!(nodes[1]);
1276         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1277
1278         // We send a third payment here, which is somewhat of a redundant test, but the
1279         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1280         // commitment transaction states) whereas here we can explicitly check for it.
1281         {
1282                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1283                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1284                 check_added_monitors!(nodes[0], 0);
1285                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1286         }
1287         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1288         check_added_monitors!(nodes[0], 1);
1289         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1290         assert_eq!(events.len(), 1);
1291         let payment_event = SendEvent::from_event(events.pop().unwrap());
1292
1293         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1294         check_added_monitors!(nodes[0], 1);
1295         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1296
1297         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1298         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1299         check_added_monitors!(nodes[1], 1);
1300         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1301
1302         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1303         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1304         check_added_monitors!(nodes[1], 1);
1305         expect_pending_htlcs_forwardable!(nodes[1]);
1306         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1307         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1308
1309         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1310         check_added_monitors!(nodes[0], 1);
1311
1312         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1313         check_added_monitors!(nodes[0], 1);
1314         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1315
1316         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1317         check_added_monitors!(nodes[1], 1);
1318         expect_pending_htlcs_forwardable!(nodes[1]);
1319         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1320
1321         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1322         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1323         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1324 }
1325
1326 #[test]
1327 fn claim_while_disconnected_monitor_update_fail() {
1328         // Test for claiming a payment while disconnected and then having the resulting
1329         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1330         // contrived case for nodes with network instability.
1331         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1332         // code introduced a regression in this test (specifically, this caught a removal of the
1333         // channel_reestablish handling ensuring the order was sensical given the messages used).
1334         let chanmon_cfgs = create_chanmon_cfgs(2);
1335         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1336         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1337         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1338         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1339
1340         // Forward a payment for B to claim
1341         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1342
1343         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1344         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1345
1346         nodes[1].node.claim_funds(payment_preimage_1);
1347         check_added_monitors!(nodes[1], 1);
1348         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1349
1350         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1351                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1352         }, true).unwrap();
1353         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1354                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1355         }, false).unwrap();
1356
1357         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1358         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1359
1360         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1361         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1362
1363         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1364         // update.
1365         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1366
1367         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1368         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1369         check_added_monitors!(nodes[1], 1);
1370         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1371
1372         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1373         // the monitor still failed
1374         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1375         {
1376                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1377                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1378                 check_added_monitors!(nodes[0], 1);
1379         }
1380
1381         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1382         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1383         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1384         check_added_monitors!(nodes[1], 1);
1385         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1386         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1387         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1388
1389         // Now un-fail the monitor, which will result in B sending its original commitment update,
1390         // receiving the commitment update from A, and the resulting commitment dances.
1391         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1392         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1393         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1394         check_added_monitors!(nodes[1], 0);
1395
1396         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1397         assert_eq!(bs_msgs.len(), 2);
1398
1399         match bs_msgs[0] {
1400                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1401                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1402                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1403                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1404                         check_added_monitors!(nodes[0], 1);
1405
1406                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1407                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1408                         check_added_monitors!(nodes[1], 1);
1409                 },
1410                 _ => panic!("Unexpected event"),
1411         }
1412
1413         match bs_msgs[1] {
1414                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1415                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1416                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1417                         check_added_monitors!(nodes[0], 1);
1418                 },
1419                 _ => panic!("Unexpected event"),
1420         }
1421
1422         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1423
1424         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1425         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1426         check_added_monitors!(nodes[0], 1);
1427         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1428
1429         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1430         check_added_monitors!(nodes[1], 1);
1431         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1432         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1433         check_added_monitors!(nodes[1], 1);
1434
1435         expect_pending_htlcs_forwardable!(nodes[1]);
1436         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1437
1438         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1439         check_added_monitors!(nodes[0], 1);
1440         expect_payment_sent!(nodes[0], payment_preimage_1);
1441
1442         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1443 }
1444
1445 #[test]
1446 fn monitor_failed_no_reestablish_response() {
1447         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1448         // response to a commitment_signed.
1449         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1450         // debug_assert!() failure in channel_reestablish handling.
1451         let chanmon_cfgs = create_chanmon_cfgs(2);
1452         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1453         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1454         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1455         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1456         {
1457                 let mut node_0_per_peer_lock;
1458                 let mut node_0_peer_state_lock;
1459                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1460         }
1461         {
1462                 let mut node_1_per_peer_lock;
1463                 let mut node_1_peer_state_lock;
1464                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1465         }
1466
1467         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1468         // on receipt).
1469         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1470         {
1471                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1472                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1473                 check_added_monitors!(nodes[0], 1);
1474         }
1475
1476         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1477         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1478         assert_eq!(events.len(), 1);
1479         let payment_event = SendEvent::from_event(events.pop().unwrap());
1480         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1481         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1482         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1483         check_added_monitors!(nodes[1], 1);
1484
1485         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1486         // is still failing to update monitors.
1487         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1488         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1489
1490         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1491                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1492         }, true).unwrap();
1493         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1494                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1495         }, false).unwrap();
1496
1497         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1498         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1499
1500         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1501         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1502         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1503         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1504
1505         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1506         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1507         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1508         check_added_monitors!(nodes[1], 0);
1509         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1510
1511         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1512         check_added_monitors!(nodes[0], 1);
1513         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1514         check_added_monitors!(nodes[0], 1);
1515
1516         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1517         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1518         check_added_monitors!(nodes[1], 1);
1519
1520         expect_pending_htlcs_forwardable!(nodes[1]);
1521         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1522
1523         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1524 }
1525
1526 #[test]
1527 fn first_message_on_recv_ordering() {
1528         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1529         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1530         // a commitment_signed which needs to send an RAA first.
1531         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1532         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1533         // response. To do this, we start routing two payments, with the final RAA for the first being
1534         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1535         // have no pending response but will want to send a RAA/CS (with the updates for the second
1536         // payment applied).
1537         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1538         let chanmon_cfgs = create_chanmon_cfgs(2);
1539         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1540         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1541         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1542         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1543
1544         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1545         // can deliver it and fail the monitor update.
1546         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1547         {
1548                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1549                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1550                 check_added_monitors!(nodes[0], 1);
1551         }
1552
1553         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1554         assert_eq!(events.len(), 1);
1555         let payment_event = SendEvent::from_event(events.pop().unwrap());
1556         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1557         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1558         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1559         check_added_monitors!(nodes[1], 1);
1560         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1561
1562         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1563         check_added_monitors!(nodes[0], 1);
1564         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1565         check_added_monitors!(nodes[0], 1);
1566
1567         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1568
1569         // Route the second payment, generating an update_add_htlc/commitment_signed
1570         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1571         {
1572                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1573                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1574                 check_added_monitors!(nodes[0], 1);
1575         }
1576         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1577         assert_eq!(events.len(), 1);
1578         let payment_event = SendEvent::from_event(events.pop().unwrap());
1579         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1580
1581         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1582
1583         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1584         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1585         // to the next message also tests resetting the delivery order.
1586         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1587         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1588         check_added_monitors!(nodes[1], 1);
1589
1590         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1591         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1592         // appropriate HTLC acceptance).
1593         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1594         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1595         check_added_monitors!(nodes[1], 1);
1596         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1597
1598         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1599         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1600         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1601         check_added_monitors!(nodes[1], 0);
1602
1603         expect_pending_htlcs_forwardable!(nodes[1]);
1604         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1605
1606         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1607         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1608         check_added_monitors!(nodes[0], 1);
1609         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1610         check_added_monitors!(nodes[0], 1);
1611
1612         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1613         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1614         check_added_monitors!(nodes[1], 1);
1615
1616         expect_pending_htlcs_forwardable!(nodes[1]);
1617         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1618
1619         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1620         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1621 }
1622
1623 #[test]
1624 fn test_monitor_update_fail_claim() {
1625         // Basic test for monitor update failures when processing claim_funds calls.
1626         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1627         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1628         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1629         // the payments from C onwards to A.
1630         let chanmon_cfgs = create_chanmon_cfgs(3);
1631         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1632         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1633         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1634         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1635         create_announced_chan_between_nodes(&nodes, 1, 2);
1636
1637         // Rebalance a bit so that we can send backwards from 3 to 2.
1638         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1639
1640         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1641
1642         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1643         nodes[1].node.claim_funds(payment_preimage_1);
1644         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1645         check_added_monitors!(nodes[1], 1);
1646
1647         // Note that at this point there is a pending commitment transaction update for A being held by
1648         // B. Even when we go to send the payment from C through B to A, B will not update this
1649         // already-signed commitment transaction and will instead wait for it to resolve before
1650         // forwarding the payment onwards.
1651
1652         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1653         {
1654                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1655                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1656                 check_added_monitors!(nodes[2], 1);
1657         }
1658
1659         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1660         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1661         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1662
1663         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1664         assert_eq!(events.len(), 1);
1665         let payment_event = SendEvent::from_event(events.pop().unwrap());
1666         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1667         let events = nodes[1].node.get_and_clear_pending_msg_events();
1668         assert_eq!(events.len(), 0);
1669         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1670         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1671
1672         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1673         nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1674                 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1675         check_added_monitors!(nodes[2], 1);
1676
1677         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1678         assert_eq!(events.len(), 1);
1679         let payment_event = SendEvent::from_event(events.pop().unwrap());
1680         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1681         let events = nodes[1].node.get_and_clear_pending_msg_events();
1682         assert_eq!(events.len(), 0);
1683         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1684
1685         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1686         let channel_id = chan_1.2;
1687         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1688         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1689         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1690         check_added_monitors!(nodes[1], 0);
1691
1692         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1693         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1694         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1695         expect_payment_sent!(nodes[0], payment_preimage_1);
1696
1697         // Get the payment forwards, note that they were batched into one commitment update.
1698         nodes[1].node.process_pending_htlc_forwards();
1699         check_added_monitors!(nodes[1], 1);
1700         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1701         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1702         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1703         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1704         expect_pending_htlcs_forwardable!(nodes[0]);
1705
1706         let events = nodes[0].node.get_and_clear_pending_events();
1707         assert_eq!(events.len(), 2);
1708         match events[0] {
1709                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1710                         assert_eq!(payment_hash_2, *payment_hash);
1711                         assert_eq!(1_000_000, amount_msat);
1712                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1713                         assert_eq!(via_channel_id, Some(channel_id));
1714                         assert_eq!(via_user_channel_id, Some(42));
1715                         match &purpose {
1716                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1717                                         assert!(payment_preimage.is_none());
1718                                         assert_eq!(payment_secret_2, *payment_secret);
1719                                 },
1720                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1721                         }
1722                 },
1723                 _ => panic!("Unexpected event"),
1724         }
1725         match events[1] {
1726                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1727                         assert_eq!(payment_hash_3, *payment_hash);
1728                         assert_eq!(1_000_000, amount_msat);
1729                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1730                         assert_eq!(via_channel_id, Some(channel_id));
1731                         match &purpose {
1732                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1733                                         assert!(payment_preimage.is_none());
1734                                         assert_eq!(payment_secret_3, *payment_secret);
1735                                 },
1736                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1737                         }
1738                 },
1739                 _ => panic!("Unexpected event"),
1740         }
1741 }
1742
1743 #[test]
1744 fn test_monitor_update_on_pending_forwards() {
1745         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1746         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1747         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1748         // from C to A will be pending a forward to A.
1749         let chanmon_cfgs = create_chanmon_cfgs(3);
1750         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1751         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1752         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1753         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1754         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1755
1756         // Rebalance a bit so that we can send backwards from 3 to 1.
1757         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1758
1759         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1760         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1761         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1762         check_added_monitors!(nodes[2], 1);
1763
1764         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1765         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1766         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1767         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1768
1769         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1770         {
1771                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1772                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1773                 check_added_monitors!(nodes[2], 1);
1774         }
1775
1776         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1777         assert_eq!(events.len(), 1);
1778         let payment_event = SendEvent::from_event(events.pop().unwrap());
1779         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1780         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1781
1782         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1783         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1784         check_added_monitors!(nodes[1], 1);
1785
1786         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1787         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1788         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1789         check_added_monitors!(nodes[1], 0);
1790
1791         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1792         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1793         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1794         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1795
1796         let events = nodes[0].node.get_and_clear_pending_events();
1797         assert_eq!(events.len(), 3);
1798         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1799                 assert_eq!(payment_hash, payment_hash_1);
1800                 assert!(payment_failed_permanently);
1801         } else { panic!("Unexpected event!"); }
1802         match events[2] {
1803                 Event::PaymentFailed { payment_hash, .. } => {
1804                         assert_eq!(payment_hash, payment_hash_1);
1805                 },
1806                 _ => panic!("Unexpected event"),
1807         }
1808         match events[0] {
1809                 Event::PendingHTLCsForwardable { .. } => { },
1810                 _ => panic!("Unexpected event"),
1811         };
1812         nodes[0].node.process_pending_htlc_forwards();
1813         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1814
1815         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1816 }
1817
1818 #[test]
1819 fn monitor_update_claim_fail_no_response() {
1820         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1821         // to channel being AwaitingRAA).
1822         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1823         // code was broken.
1824         let chanmon_cfgs = create_chanmon_cfgs(2);
1825         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1826         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1827         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1828         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1829
1830         // Forward a payment for B to claim
1831         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1832
1833         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1834         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1835         {
1836                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1837                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1838                 check_added_monitors!(nodes[0], 1);
1839         }
1840
1841         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1842         assert_eq!(events.len(), 1);
1843         let payment_event = SendEvent::from_event(events.pop().unwrap());
1844         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1845         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1846
1847         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1848         nodes[1].node.claim_funds(payment_preimage_1);
1849         check_added_monitors!(nodes[1], 1);
1850
1851         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1852
1853         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1854         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1855         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1856         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1857         check_added_monitors!(nodes[1], 0);
1858         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1859
1860         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1861         check_added_monitors!(nodes[1], 1);
1862         expect_pending_htlcs_forwardable!(nodes[1]);
1863         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1864
1865         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1866         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1867         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1868         expect_payment_sent!(nodes[0], payment_preimage_1);
1869
1870         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1871 }
1872
1873 // restore_b_before_conf has no meaning if !confirm_a_first
1874 // restore_b_before_lock has no meaning if confirm_a_first
1875 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1876         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1877         // the channel setup happily after the update is restored.
1878         let chanmon_cfgs = create_chanmon_cfgs(2);
1879         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1880         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1881         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1882
1883         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1884         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1885         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1886
1887         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1888
1889         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1890         check_added_monitors!(nodes[0], 0);
1891
1892         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1893         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1894         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1895         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1896         check_added_monitors!(nodes[1], 1);
1897
1898         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1899         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1900         check_added_monitors!(nodes[0], 1);
1901         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1902         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1903         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1904         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1905         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1906         check_added_monitors!(nodes[0], 0);
1907         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1908
1909         let events = nodes[0].node.get_and_clear_pending_events();
1910         assert_eq!(events.len(), 0);
1911         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1912         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1913
1914         if confirm_a_first {
1915                 confirm_transaction(&nodes[0], &funding_tx);
1916                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1917                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1918                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1919         } else {
1920                 assert!(!restore_b_before_conf);
1921                 confirm_transaction(&nodes[1], &funding_tx);
1922                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1923         }
1924
1925         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1926         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1927         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1928         reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1929
1930         // But we want to re-emit ChannelPending
1931         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1932         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1933         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1934
1935         if !restore_b_before_conf {
1936                 confirm_transaction(&nodes[1], &funding_tx);
1937                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1938                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1939         }
1940         if !confirm_a_first && !restore_b_before_lock {
1941                 confirm_transaction(&nodes[0], &funding_tx);
1942                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1943                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1944                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1945         }
1946
1947         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1948         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1949         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1950         check_added_monitors!(nodes[1], 0);
1951
1952         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1953                 if !restore_b_before_lock {
1954                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1955                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1956                 } else {
1957                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1958                         confirm_transaction(&nodes[0], &funding_tx);
1959                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1960                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1961                 }
1962         } else {
1963                 if restore_b_before_conf {
1964                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1965                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1966                         confirm_transaction(&nodes[1], &funding_tx);
1967                 }
1968                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1969                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1970         };
1971         for node in nodes.iter() {
1972                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1973                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1974                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1975         }
1976
1977         if !restore_b_before_lock {
1978                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1979         } else {
1980                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1981         }
1982
1983
1984         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1985         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1986         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1987         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1988 }
1989
1990 #[test]
1991 fn during_funding_monitor_fail() {
1992         do_during_funding_monitor_fail(true, true, false);
1993         do_during_funding_monitor_fail(true, false, false);
1994         do_during_funding_monitor_fail(false, false, false);
1995         do_during_funding_monitor_fail(false, false, true);
1996 }
1997
1998 #[test]
1999 fn test_path_paused_mpp() {
2000         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
2001         // monitor update
2002         let chanmon_cfgs = create_chanmon_cfgs(4);
2003         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2004         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2005         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2006
2007         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2008         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
2009         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
2010         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
2011
2012         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
2013
2014         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
2015         let path = route.paths[0].clone();
2016         route.paths.push(path);
2017         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
2018         route.paths[0].hops[0].short_channel_id = chan_1_id;
2019         route.paths[0].hops[1].short_channel_id = chan_3_id;
2020         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
2021         route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
2022         route.paths[1].hops[1].short_channel_id = chan_4_id;
2023
2024         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2025         // (for the path 0 -> 2 -> 3) fails.
2026         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2027         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2028
2029         // Now check that we get the right return value, indicating that the first path succeeded but
2030         // the second got a MonitorUpdateInProgress err. This implies
2031         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2032         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2033                 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2034         ) {
2035                 assert_eq!(results.len(), 2);
2036                 if let Ok(()) = results[0] {} else { panic!(); }
2037                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2038         } else { panic!(); }
2039         check_added_monitors!(nodes[0], 2);
2040         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2041
2042         // Pass the first HTLC of the payment along to nodes[3].
2043         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2044         assert_eq!(events.len(), 1);
2045         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2046
2047         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2048         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2049         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2050         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2051         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2052         assert_eq!(events.len(), 1);
2053         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2054
2055         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2056 }
2057
2058 #[test]
2059 fn test_pending_update_fee_ack_on_reconnect() {
2060         // In early versions of our automated fee update patch, nodes did not correctly use the
2061         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2062         // undelivered commitment_signed.
2063         //
2064         // B sends A new HTLC + CS, not delivered
2065         // A sends B update_fee + CS
2066         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2067         // reconnect
2068         // B resends initial CS, using the original fee
2069
2070         let chanmon_cfgs = create_chanmon_cfgs(2);
2071         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2072         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2073         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2074
2075         create_announced_chan_between_nodes(&nodes, 0, 1);
2076         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2077
2078         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2079         nodes[1].node.send_payment_with_route(&route, payment_hash,
2080                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2081         check_added_monitors!(nodes[1], 1);
2082         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2083         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2084
2085         {
2086                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2087                 *feerate_lock *= 2;
2088         }
2089         nodes[0].node.timer_tick_occurred();
2090         check_added_monitors!(nodes[0], 1);
2091         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2092         assert!(as_update_fee_msgs.update_fee.is_some());
2093
2094         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2095         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2096         check_added_monitors!(nodes[1], 1);
2097         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2098         // bs_first_raa is not delivered until it is re-generated after reconnect
2099
2100         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2101         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2102
2103         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2104                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2105         }, true).unwrap();
2106         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2107         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2108                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2109         }, false).unwrap();
2110         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2111
2112         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2113         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2114         assert_eq!(bs_resend_msgs.len(), 3);
2115         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2116                 assert_eq!(*updates, bs_initial_send_msgs);
2117         } else { panic!(); }
2118         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2119                 assert_eq!(*msg, bs_first_raa);
2120         } else { panic!(); }
2121         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2122
2123         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2124         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2125
2126         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2127         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2128         check_added_monitors!(nodes[0], 1);
2129         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2130         check_added_monitors!(nodes[1], 1);
2131         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2132
2133         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2134         check_added_monitors!(nodes[0], 1);
2135         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2136         check_added_monitors!(nodes[1], 1);
2137         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2138
2139         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2140         check_added_monitors!(nodes[0], 1);
2141         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2142         check_added_monitors!(nodes[0], 1);
2143
2144         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2145         check_added_monitors!(nodes[1], 1);
2146
2147         expect_pending_htlcs_forwardable!(nodes[0]);
2148         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2149
2150         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2151 }
2152
2153 #[test]
2154 fn test_fail_htlc_on_broadcast_after_claim() {
2155         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2156         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2157         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2158         // HTLC was not included in a confirmed commitment transaction.
2159         //
2160         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2161         // channel immediately before commitment occurs. After the commitment transaction reaches
2162         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2163         let chanmon_cfgs = create_chanmon_cfgs(3);
2164         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2165         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2166         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2167
2168         create_announced_chan_between_nodes(&nodes, 0, 1);
2169         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2170
2171         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2172
2173         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2174         assert_eq!(bs_txn.len(), 1);
2175
2176         nodes[2].node.claim_funds(payment_preimage);
2177         check_added_monitors!(nodes[2], 1);
2178         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2179
2180         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2181         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2182         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2183         check_added_monitors!(nodes[1], 1);
2184         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2185
2186         mine_transaction(&nodes[1], &bs_txn[0]);
2187         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2188         check_closed_broadcast!(nodes[1], true);
2189         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2190         check_added_monitors!(nodes[1], 1);
2191         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2192
2193         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2194         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2195         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2196         expect_payment_path_successful!(nodes[0]);
2197 }
2198
2199 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2200         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2201         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2202         // explicitly here.
2203         let chanmon_cfgs = create_chanmon_cfgs(2);
2204         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2205         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2206         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2207
2208         create_announced_chan_between_nodes(&nodes, 0, 1);
2209         send_payment(&nodes[0], &[&nodes[1]], 1000);
2210
2211         {
2212                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2213                 *feerate_lock += 20;
2214         }
2215         nodes[0].node.timer_tick_occurred();
2216         check_added_monitors!(nodes[0], 1);
2217         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2218         assert!(update_msgs.update_fee.is_some());
2219         if deliver_update {
2220                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2221         }
2222
2223         if parallel_updates {
2224                 {
2225                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2226                         *feerate_lock += 20;
2227                 }
2228                 nodes[0].node.timer_tick_occurred();
2229                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2230         }
2231
2232         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2233         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2234
2235         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2236                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2237         }, true).unwrap();
2238         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2239         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2240                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2241         }, false).unwrap();
2242         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2243
2244         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2245         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2246         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2247
2248         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2249         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2250         assert_eq!(as_reconnect_msgs.len(), 2);
2251         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2252         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2253                 { updates } else { panic!(); };
2254         assert!(update_msgs.update_fee.is_some());
2255         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2256         if parallel_updates {
2257                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2258                 check_added_monitors!(nodes[1], 1);
2259                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2260                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2261                 check_added_monitors!(nodes[0], 1);
2262                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2263
2264                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2265                 check_added_monitors!(nodes[0], 1);
2266                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2267
2268                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2269                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2270                 check_added_monitors!(nodes[1], 1);
2271                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2272
2273                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2274                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2275                 check_added_monitors!(nodes[1], 1);
2276
2277                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2278                 check_added_monitors!(nodes[0], 1);
2279
2280                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2281                 check_added_monitors!(nodes[0], 1);
2282                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2283
2284                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2285                 check_added_monitors!(nodes[1], 1);
2286         } else {
2287                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2288         }
2289
2290         send_payment(&nodes[0], &[&nodes[1]], 1000);
2291 }
2292 #[test]
2293 fn update_fee_resend_test() {
2294         do_update_fee_resend_test(false, false);
2295         do_update_fee_resend_test(true, false);
2296         do_update_fee_resend_test(false, true);
2297         do_update_fee_resend_test(true, true);
2298 }
2299
2300 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2301         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2302         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2303         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2304         // which failed in such a case).
2305         let chanmon_cfgs = create_chanmon_cfgs(2);
2306         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2307         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2308         let persister: test_utils::TestPersister;
2309         let new_chain_monitor: test_utils::TestChainMonitor;
2310         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2311         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2312
2313         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2314         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2315         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2316
2317         // Do a really complicated dance to get an HTLC into the holding cell, with
2318         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2319         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2320         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2321         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2322         // flags.
2323         //
2324         // We do this by:
2325         //  a) routing a payment from node B to node A,
2326         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2327         //     putting node A in AwaitingRemoteRevoke,
2328         //  c) sending a second payment from node A to node B, which is immediately placed in the
2329         //     holding cell,
2330         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2331         //     when we try to persist the payment preimage,
2332         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2333         //     clearing AwaitingRemoteRevoke on node A.
2334         //
2335         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2336         // (c) will not be freed from the holding cell.
2337         let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2338
2339         nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2340                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2341         check_added_monitors!(nodes[0], 1);
2342         let send = SendEvent::from_node(&nodes[0]);
2343         assert_eq!(send.msgs.len(), 1);
2344
2345         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2346                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2347         check_added_monitors!(nodes[0], 0);
2348
2349         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2350         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2351         nodes[0].node.claim_funds(payment_preimage_0);
2352         check_added_monitors!(nodes[0], 1);
2353
2354         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2355         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2356         check_added_monitors!(nodes[1], 1);
2357
2358         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2359
2360         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2361         check_added_monitors!(nodes[0], 1);
2362
2363         if disconnect {
2364                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2365                 // disconnect the peers. Note that the fuzzer originally found this issue because
2366                 // deserializing a ChannelManager in this state causes an assertion failure.
2367                 if reload_a {
2368                         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2369                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2370                 } else {
2371                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2372                 }
2373                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2374
2375                 // Now reconnect the two
2376                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2377                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2378                 }, true).unwrap();
2379                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2380                 assert_eq!(reestablish_1.len(), 1);
2381                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2382                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2383                 }, false).unwrap();
2384                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2385                 assert_eq!(reestablish_2.len(), 1);
2386
2387                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2388                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2389                 check_added_monitors!(nodes[1], 0);
2390
2391                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2392                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2393
2394                 assert!(resp_0.0.is_none());
2395                 assert!(resp_0.1.is_none());
2396                 assert!(resp_0.2.is_none());
2397                 assert!(resp_1.0.is_none());
2398                 assert!(resp_1.1.is_none());
2399
2400                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2401                 // moment).
2402                 if let Some(pending_cs) = resp_1.2 {
2403                         assert!(pending_cs.update_add_htlcs.is_empty());
2404                         assert!(pending_cs.update_fail_htlcs.is_empty());
2405                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2406                         assert_eq!(pending_cs.commitment_signed, cs);
2407                 } else { panic!(); }
2408
2409                 // There should be no monitor updates as we are still pending awaiting a failed one.
2410                 check_added_monitors!(nodes[0], 0);
2411                 check_added_monitors!(nodes[1], 0);
2412         }
2413
2414         // If we finish updating the monitor, we should free the holding cell right away (this did
2415         // not occur prior to #756).
2416         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2417         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2418         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2419         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2420
2421         // New outbound messages should be generated immediately upon a call to
2422         // get_and_clear_pending_msg_events (but not before).
2423         check_added_monitors!(nodes[0], 0);
2424         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2425         check_added_monitors!(nodes[0], 1);
2426         assert_eq!(events.len(), 1);
2427
2428         // Deliver the pending in-flight CS
2429         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2430         check_added_monitors!(nodes[0], 1);
2431
2432         let commitment_msg = match events.pop().unwrap() {
2433                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2434                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2435                         assert!(updates.update_fail_htlcs.is_empty());
2436                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2437                         assert!(updates.update_fee.is_none());
2438                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2439                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2440                         expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
2441                         assert_eq!(updates.update_add_htlcs.len(), 1);
2442                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2443                         updates.commitment_signed
2444                 },
2445                 _ => panic!("Unexpected event type!"),
2446         };
2447
2448         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2449         check_added_monitors!(nodes[1], 1);
2450
2451         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2452         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2453         expect_pending_htlcs_forwardable!(nodes[1]);
2454         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2455         check_added_monitors!(nodes[1], 1);
2456
2457         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
2458
2459         let events = nodes[1].node.get_and_clear_pending_events();
2460         assert_eq!(events.len(), 2);
2461         match events[0] {
2462                 Event::PendingHTLCsForwardable { .. } => { },
2463                 _ => panic!("Unexpected event"),
2464         };
2465         match events[1] {
2466                 Event::PaymentPathSuccessful { .. } => { },
2467                 _ => panic!("Unexpected event"),
2468         };
2469
2470         nodes[1].node.process_pending_htlc_forwards();
2471         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2472
2473         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2474         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2475 }
2476 #[test]
2477 fn channel_holding_cell_serialize() {
2478         do_channel_holding_cell_serialize(true, true);
2479         do_channel_holding_cell_serialize(true, false);
2480         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2481 }
2482
2483 #[derive(PartialEq)]
2484 enum HTLCStatusAtDupClaim {
2485         Received,
2486         HoldingCell,
2487         Cleared,
2488 }
2489 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2490         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2491         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2492         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2493         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2494         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2495         // channel on which the inbound HTLC was received.
2496         let chanmon_cfgs = create_chanmon_cfgs(3);
2497         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2498         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2499         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2500
2501         create_announced_chan_between_nodes(&nodes, 0, 1);
2502         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2503
2504         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2505
2506         let mut as_raa = None;
2507         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2508                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2509                 // awaiting a remote revoke_and_ack from nodes[0].
2510                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2511                 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2512                         RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2513                 check_added_monitors!(nodes[0], 1);
2514
2515                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2516                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2517                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2518                 check_added_monitors!(nodes[1], 1);
2519
2520                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2521                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2522                 check_added_monitors!(nodes[0], 1);
2523                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2524                 check_added_monitors!(nodes[0], 1);
2525
2526                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2527         }
2528
2529         let fulfill_msg = msgs::UpdateFulfillHTLC {
2530                 channel_id: chan_id_2,
2531                 htlc_id: 0,
2532                 payment_preimage,
2533         };
2534         if second_fails {
2535                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2536                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2537                 check_added_monitors!(nodes[2], 1);
2538                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2539         } else {
2540                 nodes[2].node.claim_funds(payment_preimage);
2541                 check_added_monitors!(nodes[2], 1);
2542                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2543
2544                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2545                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2546                 // Check that the message we're about to deliver matches the one generated:
2547                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2548         }
2549         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2550         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2551         check_added_monitors!(nodes[1], 1);
2552
2553         let mut bs_updates = None;
2554         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2555                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2556                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2557                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2558                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2559                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2560                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2561                         expect_payment_path_successful!(nodes[0]);
2562                 }
2563         } else {
2564                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2565         }
2566
2567         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2568         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2569
2570         if second_fails {
2571                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
2572                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2573         } else {
2574                 reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
2575         }
2576
2577         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2578                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2579                 check_added_monitors!(nodes[1], 1);
2580                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2581
2582                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2583                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2584                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2585                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2586         }
2587         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2588                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2589                 expect_payment_path_successful!(nodes[0]);
2590         }
2591 }
2592
2593 #[test]
2594 fn test_reconnect_dup_htlc_claims() {
2595         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2596         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2597         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2598         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2599         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2600         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2601 }
2602
2603 #[test]
2604 fn test_temporary_error_during_shutdown() {
2605         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2606         // close.
2607         let mut config = test_default_channel_config();
2608         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2609
2610         let chanmon_cfgs = create_chanmon_cfgs(2);
2611         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2612         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2613         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2614
2615         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2616
2617         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2618         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2619
2620         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2621         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2622         check_added_monitors!(nodes[1], 1);
2623
2624         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2625         check_added_monitors!(nodes[0], 1);
2626
2627         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2628
2629         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2630         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2631
2632         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2633         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2634         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2635
2636         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2637
2638         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2639         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2640         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2641
2642         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2643         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2644         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2645
2646         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2647         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2648         assert!(none_b.is_none());
2649         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2650
2651         assert_eq!(txn_a, txn_b);
2652         assert_eq!(txn_a.len(), 1);
2653         check_spends!(txn_a[0], funding_tx);
2654         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2655         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2656 }
2657
2658 #[test]
2659 fn test_permanent_error_during_sending_shutdown() {
2660         // Test that permanent failures when updating the monitor's shutdown script result in a force
2661         // close when initiating a cooperative close.
2662         let mut config = test_default_channel_config();
2663         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2664
2665         let chanmon_cfgs = create_chanmon_cfgs(2);
2666         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2667         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2668         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2669
2670         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2671         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2672
2673         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2674
2675         // We always send the `shutdown` response when initiating a shutdown, even if we immediately
2676         // close the channel thereafter.
2677         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2678         assert_eq!(msg_events.len(), 3);
2679         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2680         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2681         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2682
2683         check_added_monitors!(nodes[0], 2);
2684         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2685 }
2686
2687 #[test]
2688 fn test_permanent_error_during_handling_shutdown() {
2689         // Test that permanent failures when updating the monitor's shutdown script result in a force
2690         // close when handling a cooperative close.
2691         let mut config = test_default_channel_config();
2692         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2693
2694         let chanmon_cfgs = create_chanmon_cfgs(2);
2695         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2696         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2697         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2698
2699         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2700         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2701
2702         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2703         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2704         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2705
2706         // We always send the `shutdown` response when receiving a shutdown, even if we immediately
2707         // close the channel thereafter.
2708         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2709         assert_eq!(msg_events.len(), 3);
2710         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2711         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2712         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2713
2714         check_added_monitors!(nodes[1], 2);
2715         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2716 }
2717
2718 #[test]
2719 fn double_temp_error() {
2720         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2721         let chanmon_cfgs = create_chanmon_cfgs(2);
2722         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2723         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2724         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2725
2726         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2727
2728         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2729         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2730
2731         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2732         // `claim_funds` results in a ChannelMonitorUpdate.
2733         nodes[1].node.claim_funds(payment_preimage_1);
2734         check_added_monitors!(nodes[1], 1);
2735         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2736
2737         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2738         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2739         // which had some asserts that prevented it from being called twice.
2740         nodes[1].node.claim_funds(payment_preimage_2);
2741         check_added_monitors!(nodes[1], 1);
2742         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2743
2744         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2745         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2746         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2747         check_added_monitors!(nodes[1], 0);
2748         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2749
2750         // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2751         // and get both PaymentClaimed events at once.
2752         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2753
2754         let events = nodes[1].node.get_and_clear_pending_events();
2755         assert_eq!(events.len(), 2);
2756         match events[0] {
2757                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2758                 _ => panic!("Unexpected Event: {:?}", events[0]),
2759         }
2760         match events[1] {
2761                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2762                 _ => panic!("Unexpected Event: {:?}", events[1]),
2763         }
2764
2765         assert_eq!(msg_events.len(), 1);
2766         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2767                 match &msg_events[0] {
2768                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2769                                 assert!(update_add_htlcs.is_empty());
2770                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2771                                 assert!(update_fail_htlcs.is_empty());
2772                                 assert!(update_fail_malformed_htlcs.is_empty());
2773                                 assert!(update_fee.is_none());
2774                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2775                         },
2776                         _ => panic!("Unexpected event"),
2777                 }
2778         };
2779         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2780         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2781         check_added_monitors!(nodes[0], 0);
2782         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2783         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2784         check_added_monitors!(nodes[0], 1);
2785         nodes[0].node.process_pending_htlc_forwards();
2786         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2787         check_added_monitors!(nodes[1], 0);
2788         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2789         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2790         check_added_monitors!(nodes[1], 1);
2791         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2792         check_added_monitors!(nodes[1], 1);
2793
2794         // Complete the second HTLC.
2795         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2796                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2797                 assert_eq!(events.len(), 2);
2798                 (match &events[0] {
2799                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2800                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2801                                 assert!(updates.update_add_htlcs.is_empty());
2802                                 assert!(updates.update_fail_htlcs.is_empty());
2803                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2804                                 assert!(updates.update_fee.is_none());
2805                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2806                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2807                         },
2808                         _ => panic!("Unexpected event"),
2809                 },
2810                  match events[1] {
2811                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2812                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2813                                  (*msg).clone()
2814                          },
2815                          _ => panic!("Unexpected event"),
2816                  })
2817         };
2818         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2819         check_added_monitors!(nodes[0], 1);
2820         expect_payment_path_successful!(nodes[0]);
2821
2822         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2823         check_added_monitors!(nodes[0], 0);
2824         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2825         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2826         expect_payment_sent!(nodes[0], payment_preimage_2);
2827 }
2828
2829 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2830         // Test that if the monitor update generated in funding_signed is stored async and we restart
2831         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2832         // drop the channel and move on.
2833         let chanmon_cfgs = create_chanmon_cfgs(2);
2834         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2835
2836         let persister: test_utils::TestPersister;
2837         let new_chain_monitor: test_utils::TestChainMonitor;
2838         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2839
2840         let mut chan_config = test_default_channel_config();
2841         chan_config.manually_accept_inbound_channels = true;
2842         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2843
2844         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2845         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2846
2847         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2848         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2849
2850         let events = nodes[1].node.get_and_clear_pending_events();
2851         assert_eq!(events.len(), 1);
2852         match events[0] {
2853                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2854                         if use_0conf {
2855                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2856                         } else {
2857                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2858                         }
2859                 },
2860                 _ => panic!("Unexpected event"),
2861         };
2862
2863         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2864
2865         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2866
2867         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2868         check_added_monitors!(nodes[0], 0);
2869
2870         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2871         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2872         check_added_monitors!(nodes[1], 1);
2873         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2874
2875         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2876         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2877         match &bs_signed_locked[0] {
2878                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2879                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2880
2881                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2882                         check_added_monitors!(nodes[0], 1);
2883                 }
2884                 _ => panic!("Unexpected event"),
2885         }
2886         if use_0conf {
2887                 match &bs_signed_locked[1] {
2888                         MessageSendEvent::SendChannelReady { msg, .. } => {
2889                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2890                         }
2891                         _ => panic!("Unexpected event"),
2892                 }
2893         }
2894
2895         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2896         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2897         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2898
2899         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2900         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2901         // ChannelMonitor lost, we should simply discard the channel.
2902
2903         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2904         // not, so we have to clear them here.
2905         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2906         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2907
2908         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2909         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
2910         assert!(nodes[0].node.list_channels().is_empty());
2911 }
2912
2913 #[test]
2914 fn test_outbound_reload_without_init_mon() {
2915         do_test_outbound_reload_without_init_mon(true);
2916         do_test_outbound_reload_without_init_mon(false);
2917 }
2918
2919 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2920         // Test that if the monitor update generated by funding_transaction_generated is stored async
2921         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2922         // completed we happily drop the channel and move on.
2923         let chanmon_cfgs = create_chanmon_cfgs(2);
2924         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2925
2926         let persister: test_utils::TestPersister;
2927         let new_chain_monitor: test_utils::TestChainMonitor;
2928         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2929
2930         let mut chan_config = test_default_channel_config();
2931         chan_config.manually_accept_inbound_channels = true;
2932         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2933
2934         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2935         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2936
2937         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2938         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2939
2940         let events = nodes[1].node.get_and_clear_pending_events();
2941         assert_eq!(events.len(), 1);
2942         match events[0] {
2943                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2944                         if use_0conf {
2945                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2946                         } else {
2947                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2948                         }
2949                 },
2950                 _ => panic!("Unexpected event"),
2951         };
2952
2953         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2954
2955         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2956
2957         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2958         check_added_monitors!(nodes[0], 0);
2959
2960         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2961         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2962         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2963         check_added_monitors!(nodes[1], 1);
2964
2965         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2966         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2967         // transaction is confirmed.
2968         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2969
2970         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2971         check_added_monitors!(nodes[0], 1);
2972         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2973
2974         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2975         if lock_commitment {
2976                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
2977                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
2978         }
2979         if use_0conf || lock_commitment {
2980                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
2981                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
2982         }
2983         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2984
2985         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
2986         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
2987         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
2988
2989         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2990         // not, so we have to clear them here.
2991         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
2992         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
2993
2994         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
2995
2996         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
2997         assert!(nodes[1].node.list_channels().is_empty());
2998 }
2999
3000 #[test]
3001 fn test_inbound_reload_without_init_mon() {
3002         do_test_inbound_reload_without_init_mon(true, true);
3003         do_test_inbound_reload_without_init_mon(true, false);
3004         do_test_inbound_reload_without_init_mon(false, true);
3005         do_test_inbound_reload_without_init_mon(false, false);
3006 }