Handle receiving custom HTLC TLVs
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::constants::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{ChannelManager, RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::AnnouncementSigsState;
24 use crate::ln::msgs;
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::enforcing_trait_impls::EnforcingSigner;
27 use crate::util::errors::APIError;
28 use crate::util::ser::{ReadableArgs, Writeable};
29 use crate::util::test_utils::TestBroadcaster;
30
31 use crate::ln::functional_test_utils::*;
32
33 use crate::util::test_utils;
34
35 use crate::io;
36 use bitcoin::hashes::Hash;
37 use crate::prelude::*;
38 use crate::sync::{Arc, Mutex};
39
40 #[test]
41 fn test_simple_monitor_permanent_update_fail() {
42         // Test that we handle a simple permanent monitor update failure
43         let chanmon_cfgs = create_chanmon_cfgs(2);
44         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
45         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
46         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47         create_announced_chan_between_nodes(&nodes, 0, 1);
48
49         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
50         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
51         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
52                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
53                 ), true, APIError::ChannelUnavailable {..}, {});
54         check_added_monitors!(nodes[0], 2);
55
56         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57         assert_eq!(events_1.len(), 2);
58         match events_1[0] {
59                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60                 _ => panic!("Unexpected event"),
61         };
62         match events_1[1] {
63                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64                 _ => panic!("Unexpected event"),
65         };
66
67         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
68
69         // TODO: Once we hit the chain with the failure transaction we should check that we get a
70         // PaymentPathFailed event
71
72         assert_eq!(nodes[0].node.list_channels().len(), 0);
73         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
74 }
75
76 #[test]
77 fn test_monitor_and_persister_update_fail() {
78         // Test that if both updating the `ChannelMonitor` and persisting the updated
79         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
80         // one that gets returned.
81         let chanmon_cfgs = create_chanmon_cfgs(2);
82         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
83         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
84         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
85
86         // Create some initial channel
87         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
88         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
89
90         // Rebalance the network to generate htlc in the two directions
91         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
92
93         // Route an HTLC from node 0 to node 1 (but don't settle)
94         let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
95
96         // Make a copy of the ChainMonitor so we can capture the error it returns on a
97         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
98         // directly, the node would fail to be `Drop`'d at the end because its
99         // ChannelManager and ChainMonitor would be out of sync.
100         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
101         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
102         let persister = test_utils::TestPersister::new();
103         let tx_broadcaster = TestBroadcaster {
104                 txn_broadcasted: Mutex::new(Vec::new()),
105                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
106                 // that we are at height 200 so that it doesn't think we're violating the time lock
107                 // requirements of transactions broadcasted at that point.
108                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
109         };
110         let chain_mon = {
111                 let new_monitor = {
112                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
113                         let new_monitor = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
114                                 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
115                         assert!(new_monitor == *monitor);
116                         new_monitor
117                 };
118                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
119                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
120                 chain_mon
121         };
122         chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
123
124         // Set the persister's return value to be a InProgress.
125         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
126
127         // Try to update ChannelMonitor
128         nodes[1].node.claim_funds(preimage);
129         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
130         check_added_monitors!(nodes[1], 1);
131
132         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
133         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
134         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
135         {
136                 let mut node_0_per_peer_lock;
137                 let mut node_0_peer_state_lock;
138                 let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
139                 if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
140                         // Check that even though the persister is returning a InProgress,
141                         // because the update is bogus, ultimately the error that's returned
142                         // should be a PermanentFailure.
143                         if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
144                         logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
145                         assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
146                 } else { assert!(false); }
147         }
148
149         check_added_monitors!(nodes[0], 1);
150         let events = nodes[0].node.get_and_clear_pending_events();
151         assert_eq!(events.len(), 1);
152 }
153
154 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
155         // Test that we can recover from a simple temporary monitor update failure optionally with
156         // a disconnect in between
157         let chanmon_cfgs = create_chanmon_cfgs(2);
158         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
159         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
160         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
161         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
162
163         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
164
165         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
166
167         {
168                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
169                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
170                         ), false, APIError::MonitorUpdateInProgress, {});
171                 check_added_monitors!(nodes[0], 1);
172         }
173
174         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
175         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
176         assert_eq!(nodes[0].node.list_channels().len(), 1);
177
178         if disconnect {
179                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
180                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
181                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
182                 reconnect_args.send_channel_ready = (true, true);
183                 reconnect_nodes(reconnect_args);
184         }
185
186         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
187         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
188         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
189         check_added_monitors!(nodes[0], 0);
190
191         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
192         assert_eq!(events_2.len(), 1);
193         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
194         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
195         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
196         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
197
198         expect_pending_htlcs_forwardable!(nodes[1]);
199
200         let events_3 = nodes[1].node.get_and_clear_pending_events();
201         assert_eq!(events_3.len(), 1);
202         match events_3[0] {
203                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
204                         assert_eq!(payment_hash_1, *payment_hash);
205                         assert_eq!(amount_msat, 1_000_000);
206                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
207                         assert_eq!(via_channel_id, Some(channel_id));
208                         match &purpose {
209                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
210                                         assert!(payment_preimage.is_none());
211                                         assert_eq!(payment_secret_1, *payment_secret);
212                                 },
213                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
214                         }
215                 },
216                 _ => panic!("Unexpected event"),
217         }
218
219         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
220
221         // Now set it to failed again...
222         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
223         {
224                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
225                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
226                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
227                         ), false, APIError::MonitorUpdateInProgress, {});
228                 check_added_monitors!(nodes[0], 1);
229         }
230
231         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
232         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
233         assert_eq!(nodes[0].node.list_channels().len(), 1);
234
235         if disconnect {
236                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
237                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
238                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
239         }
240
241         // ...and make sure we can force-close a frozen channel
242         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
243         check_added_monitors!(nodes[0], 1);
244         check_closed_broadcast!(nodes[0], true);
245
246         // TODO: Once we hit the chain with the failure transaction we should check that we get a
247         // PaymentPathFailed event
248
249         assert_eq!(nodes[0].node.list_channels().len(), 0);
250         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
251 }
252
253 #[test]
254 fn test_simple_monitor_temporary_update_fail() {
255         do_test_simple_monitor_temporary_update_fail(false);
256         do_test_simple_monitor_temporary_update_fail(true);
257 }
258
259 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
260         let disconnect_flags = 8 | 16;
261
262         // Test that we can recover from a temporary monitor update failure with some in-flight
263         // HTLCs going on at the same time potentially with some disconnection thrown in.
264         // * First we route a payment, then get a temporary monitor update failure when trying to
265         //   route a second payment. We then claim the first payment.
266         // * If disconnect_count is set, we will disconnect at this point (which is likely as
267         //   InProgress likely indicates net disconnect which resulted in failing to update the
268         //   ChannelMonitor on a watchtower).
269         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
270         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
271         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
272         //   disconnect_count & !disconnect_flags is 0).
273         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
274         //   through message sending, potentially disconnect/reconnecting multiple times based on
275         //   disconnect_count, to get the update_fulfill_htlc through.
276         // * We then walk through more message exchanges to get the original update_add_htlc
277         //   through, swapping message ordering based on disconnect_count & 8 and optionally
278         //   disconnect/reconnecting based on disconnect_count.
279         let chanmon_cfgs = create_chanmon_cfgs(2);
280         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
281         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
282         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
283         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
284
285         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
286
287         // Now try to send a second payment which will fail to send
288         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
289         {
290                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
291                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
292                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
293                         ), false, APIError::MonitorUpdateInProgress, {});
294                 check_added_monitors!(nodes[0], 1);
295         }
296
297         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
298         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
299         assert_eq!(nodes[0].node.list_channels().len(), 1);
300
301         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
302         // but nodes[0] won't respond since it is frozen.
303         nodes[1].node.claim_funds(payment_preimage_1);
304         check_added_monitors!(nodes[1], 1);
305         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
306
307         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
308         assert_eq!(events_2.len(), 1);
309         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
310                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
311                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
312                         assert!(update_add_htlcs.is_empty());
313                         assert_eq!(update_fulfill_htlcs.len(), 1);
314                         assert!(update_fail_htlcs.is_empty());
315                         assert!(update_fail_malformed_htlcs.is_empty());
316                         assert!(update_fee.is_none());
317
318                         if (disconnect_count & 16) == 0 {
319                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
320                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
321                                 assert_eq!(events_3.len(), 1);
322                                 match events_3[0] {
323                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
324                                                 assert_eq!(*payment_preimage, payment_preimage_1);
325                                                 assert_eq!(*payment_hash, payment_hash_1);
326                                         },
327                                         _ => panic!("Unexpected event"),
328                                 }
329
330                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
331                                 check_added_monitors!(nodes[0], 1);
332                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
333                         }
334
335                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
336                 },
337                 _ => panic!("Unexpected event"),
338         };
339
340         if disconnect_count & !disconnect_flags > 0 {
341                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
342                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
343         }
344
345         // Now fix monitor updating...
346         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
347         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
348         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
349         check_added_monitors!(nodes[0], 0);
350
351         macro_rules! disconnect_reconnect_peers { () => { {
352                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
353                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
354
355                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
356                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
357                 }, true).unwrap();
358                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
359                 assert_eq!(reestablish_1.len(), 1);
360                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
361                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
362                 }, false).unwrap();
363                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
364                 assert_eq!(reestablish_2.len(), 1);
365
366                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
367                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
368                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
369                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
370
371                 assert!(as_resp.0.is_none());
372                 assert!(bs_resp.0.is_none());
373
374                 (reestablish_1, reestablish_2, as_resp, bs_resp)
375         } } }
376
377         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
378                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
379                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
380
381                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
382                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
383                 }, true).unwrap();
384                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
385                 assert_eq!(reestablish_1.len(), 1);
386                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
387                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
388                 }, false).unwrap();
389                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
390                 assert_eq!(reestablish_2.len(), 1);
391
392                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
393                 check_added_monitors!(nodes[0], 0);
394                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
395                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
396                 check_added_monitors!(nodes[1], 0);
397                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
398
399                 assert!(as_resp.0.is_none());
400                 assert!(bs_resp.0.is_none());
401
402                 assert!(bs_resp.1.is_none());
403                 if (disconnect_count & 16) == 0 {
404                         assert!(bs_resp.2.is_none());
405
406                         assert!(as_resp.1.is_some());
407                         assert!(as_resp.2.is_some());
408                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
409                 } else {
410                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
411                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
412                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
413                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
414                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
415                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
416
417                         assert!(as_resp.1.is_none());
418
419                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
420                         let events_3 = nodes[0].node.get_and_clear_pending_events();
421                         assert_eq!(events_3.len(), 1);
422                         match events_3[0] {
423                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
424                                         assert_eq!(*payment_preimage, payment_preimage_1);
425                                         assert_eq!(*payment_hash, payment_hash_1);
426                                 },
427                                 _ => panic!("Unexpected event"),
428                         }
429
430                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
431                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
432                         // No commitment_signed so get_event_msg's assert(len == 1) passes
433                         check_added_monitors!(nodes[0], 1);
434
435                         as_resp.1 = Some(as_resp_raa);
436                         bs_resp.2 = None;
437                 }
438
439                 if disconnect_count & !disconnect_flags > 1 {
440                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
441
442                         if (disconnect_count & 16) == 0 {
443                                 assert!(reestablish_1 == second_reestablish_1);
444                                 assert!(reestablish_2 == second_reestablish_2);
445                         }
446                         assert!(as_resp == second_as_resp);
447                         assert!(bs_resp == second_bs_resp);
448                 }
449
450                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
451         } else {
452                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
453                 assert_eq!(events_4.len(), 2);
454                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
455                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
456                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
457                                 msg.clone()
458                         },
459                         _ => panic!("Unexpected event"),
460                 })
461         };
462
463         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
464
465         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
466         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
467         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
468         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
469         check_added_monitors!(nodes[1], 1);
470
471         if disconnect_count & !disconnect_flags > 2 {
472                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
473
474                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
475                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
476
477                 assert!(as_resp.2.is_none());
478                 assert!(bs_resp.2.is_none());
479         }
480
481         let as_commitment_update;
482         let bs_second_commitment_update;
483
484         macro_rules! handle_bs_raa { () => {
485                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
486                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
487                 assert!(as_commitment_update.update_add_htlcs.is_empty());
488                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
489                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
490                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
491                 assert!(as_commitment_update.update_fee.is_none());
492                 check_added_monitors!(nodes[0], 1);
493         } }
494
495         macro_rules! handle_initial_raa { () => {
496                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
497                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
498                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
499                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
500                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
501                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
502                 assert!(bs_second_commitment_update.update_fee.is_none());
503                 check_added_monitors!(nodes[1], 1);
504         } }
505
506         if (disconnect_count & 8) == 0 {
507                 handle_bs_raa!();
508
509                 if disconnect_count & !disconnect_flags > 3 {
510                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
511
512                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
513                         assert!(bs_resp.1.is_none());
514
515                         assert!(as_resp.2.unwrap() == as_commitment_update);
516                         assert!(bs_resp.2.is_none());
517
518                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
519                 }
520
521                 handle_initial_raa!();
522
523                 if disconnect_count & !disconnect_flags > 4 {
524                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
525
526                         assert!(as_resp.1.is_none());
527                         assert!(bs_resp.1.is_none());
528
529                         assert!(as_resp.2.unwrap() == as_commitment_update);
530                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
531                 }
532         } else {
533                 handle_initial_raa!();
534
535                 if disconnect_count & !disconnect_flags > 3 {
536                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
537
538                         assert!(as_resp.1.is_none());
539                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
540
541                         assert!(as_resp.2.is_none());
542                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
543
544                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
545                 }
546
547                 handle_bs_raa!();
548
549                 if disconnect_count & !disconnect_flags > 4 {
550                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
551
552                         assert!(as_resp.1.is_none());
553                         assert!(bs_resp.1.is_none());
554
555                         assert!(as_resp.2.unwrap() == as_commitment_update);
556                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
557                 }
558         }
559
560         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
561         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
562         // No commitment_signed so get_event_msg's assert(len == 1) passes
563         check_added_monitors!(nodes[0], 1);
564
565         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
566         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
567         // No commitment_signed so get_event_msg's assert(len == 1) passes
568         check_added_monitors!(nodes[1], 1);
569
570         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
571         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
572         check_added_monitors!(nodes[1], 1);
573
574         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
575         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
576         check_added_monitors!(nodes[0], 1);
577         expect_payment_path_successful!(nodes[0]);
578
579         expect_pending_htlcs_forwardable!(nodes[1]);
580
581         let events_5 = nodes[1].node.get_and_clear_pending_events();
582         assert_eq!(events_5.len(), 1);
583         match events_5[0] {
584                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
585                         assert_eq!(payment_hash_2, *payment_hash);
586                         assert_eq!(amount_msat, 1_000_000);
587                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
588                         assert_eq!(via_channel_id, Some(channel_id));
589                         match &purpose {
590                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
591                                         assert!(payment_preimage.is_none());
592                                         assert_eq!(payment_secret_2, *payment_secret);
593                                 },
594                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
595                         }
596                 },
597                 _ => panic!("Unexpected event"),
598         }
599
600         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
601 }
602
603 #[test]
604 fn test_monitor_temporary_update_fail_a() {
605         do_test_monitor_temporary_update_fail(0);
606         do_test_monitor_temporary_update_fail(1);
607         do_test_monitor_temporary_update_fail(2);
608         do_test_monitor_temporary_update_fail(3);
609         do_test_monitor_temporary_update_fail(4);
610         do_test_monitor_temporary_update_fail(5);
611 }
612
613 #[test]
614 fn test_monitor_temporary_update_fail_b() {
615         do_test_monitor_temporary_update_fail(2 | 8);
616         do_test_monitor_temporary_update_fail(3 | 8);
617         do_test_monitor_temporary_update_fail(4 | 8);
618         do_test_monitor_temporary_update_fail(5 | 8);
619 }
620
621 #[test]
622 fn test_monitor_temporary_update_fail_c() {
623         do_test_monitor_temporary_update_fail(1 | 16);
624         do_test_monitor_temporary_update_fail(2 | 16);
625         do_test_monitor_temporary_update_fail(3 | 16);
626         do_test_monitor_temporary_update_fail(2 | 8 | 16);
627         do_test_monitor_temporary_update_fail(3 | 8 | 16);
628 }
629
630 #[test]
631 fn test_monitor_update_fail_cs() {
632         // Tests handling of a monitor update failure when processing an incoming commitment_signed
633         let chanmon_cfgs = create_chanmon_cfgs(2);
634         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
635         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
636         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
637         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
638
639         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
640         {
641                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
642                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
643                 check_added_monitors!(nodes[0], 1);
644         }
645
646         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
647         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
648
649         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
650         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
651         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
652         check_added_monitors!(nodes[1], 1);
653         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
654
655         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
656         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
657         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
658         check_added_monitors!(nodes[1], 0);
659         let responses = nodes[1].node.get_and_clear_pending_msg_events();
660         assert_eq!(responses.len(), 2);
661
662         match responses[0] {
663                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
664                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
665                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
666                         check_added_monitors!(nodes[0], 1);
667                 },
668                 _ => panic!("Unexpected event"),
669         }
670         match responses[1] {
671                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
672                         assert!(updates.update_add_htlcs.is_empty());
673                         assert!(updates.update_fulfill_htlcs.is_empty());
674                         assert!(updates.update_fail_htlcs.is_empty());
675                         assert!(updates.update_fail_malformed_htlcs.is_empty());
676                         assert!(updates.update_fee.is_none());
677                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
678
679                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
680                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
681                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
682                         check_added_monitors!(nodes[0], 1);
683                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
684                 },
685                 _ => panic!("Unexpected event"),
686         }
687
688         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
689         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
690         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
691         check_added_monitors!(nodes[0], 0);
692
693         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
694         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
695         check_added_monitors!(nodes[1], 1);
696
697         expect_pending_htlcs_forwardable!(nodes[1]);
698
699         let events = nodes[1].node.get_and_clear_pending_events();
700         assert_eq!(events.len(), 1);
701         match events[0] {
702                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
703                         assert_eq!(payment_hash, our_payment_hash);
704                         assert_eq!(amount_msat, 1_000_000);
705                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
706                         assert_eq!(via_channel_id, Some(channel_id));
707                         match &purpose {
708                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
709                                         assert!(payment_preimage.is_none());
710                                         assert_eq!(our_payment_secret, *payment_secret);
711                                 },
712                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
713                         }
714                 },
715                 _ => panic!("Unexpected event"),
716         };
717
718         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
719 }
720
721 #[test]
722 fn test_monitor_update_fail_no_rebroadcast() {
723         // Tests handling of a monitor update failure when no message rebroadcasting on
724         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
725         // fuzz tests.
726         let chanmon_cfgs = create_chanmon_cfgs(2);
727         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
728         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
729         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
730         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
731
732         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
733         {
734                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
735                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
736                 check_added_monitors!(nodes[0], 1);
737         }
738
739         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
740         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
741         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
742
743         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
744         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
745         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
746         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
747         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
748         check_added_monitors!(nodes[1], 1);
749
750         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
751         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
752         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
753         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
754         check_added_monitors!(nodes[1], 0);
755         expect_pending_htlcs_forwardable!(nodes[1]);
756
757         let events = nodes[1].node.get_and_clear_pending_events();
758         assert_eq!(events.len(), 1);
759         match events[0] {
760                 Event::PaymentClaimable { payment_hash, .. } => {
761                         assert_eq!(payment_hash, our_payment_hash);
762                 },
763                 _ => panic!("Unexpected event"),
764         }
765
766         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
767 }
768
769 #[test]
770 fn test_monitor_update_raa_while_paused() {
771         // Tests handling of an RAA while monitor updating has already been marked failed.
772         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
773         let chanmon_cfgs = create_chanmon_cfgs(2);
774         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
775         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
776         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
777         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
778
779         send_payment(&nodes[0], &[&nodes[1]], 5000000);
780         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
781         {
782                 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
783                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
784                 check_added_monitors!(nodes[0], 1);
785         }
786         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
787
788         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
789         {
790                 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
791                         RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
792                 check_added_monitors!(nodes[1], 1);
793         }
794         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
795
796         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
797         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
798         check_added_monitors!(nodes[1], 1);
799         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
800
801         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
802         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
803         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
804         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
805         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
806         check_added_monitors!(nodes[0], 1);
807         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
808
809         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
810         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
811         check_added_monitors!(nodes[0], 1);
812
813         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
814         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
815         check_added_monitors!(nodes[0], 0);
816
817         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
818         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
819         check_added_monitors!(nodes[1], 1);
820         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
821
822         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
823         check_added_monitors!(nodes[1], 1);
824         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
825
826         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
827         check_added_monitors!(nodes[0], 1);
828         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
829
830         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
831         check_added_monitors!(nodes[0], 1);
832         expect_pending_htlcs_forwardable!(nodes[0]);
833         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
834
835         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
836         check_added_monitors!(nodes[1], 1);
837         expect_pending_htlcs_forwardable!(nodes[1]);
838         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
839
840         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
841         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
842 }
843
844 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
845         // Tests handling of a monitor update failure when processing an incoming RAA
846         let chanmon_cfgs = create_chanmon_cfgs(3);
847         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
848         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
849         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
850         create_announced_chan_between_nodes(&nodes, 0, 1);
851         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
852
853         // Rebalance a bit so that we can send backwards from 2 to 1.
854         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
855
856         // Route a first payment that we'll fail backwards
857         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
858
859         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
860         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
861         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
862         check_added_monitors!(nodes[2], 1);
863
864         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
865         assert!(updates.update_add_htlcs.is_empty());
866         assert!(updates.update_fulfill_htlcs.is_empty());
867         assert_eq!(updates.update_fail_htlcs.len(), 1);
868         assert!(updates.update_fail_malformed_htlcs.is_empty());
869         assert!(updates.update_fee.is_none());
870         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
871
872         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
873         check_added_monitors!(nodes[0], 0);
874
875         // While the second channel is AwaitingRAA, forward a second payment to get it into the
876         // holding cell.
877         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
878         {
879                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
880                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
881                 check_added_monitors!(nodes[0], 1);
882         }
883
884         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
885         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
886         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
887
888         expect_pending_htlcs_forwardable!(nodes[1]);
889         check_added_monitors!(nodes[1], 0);
890         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
891
892         // Now fail monitor updating.
893         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
894         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
895         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
896         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
897         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
898         check_added_monitors!(nodes[1], 1);
899
900         // Forward a third payment which will also be added to the holding cell, despite the channel
901         // being paused waiting a monitor update.
902         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
903         {
904                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
905                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
906                 check_added_monitors!(nodes[0], 1);
907         }
908
909         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
910         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
911         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
912         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
913         check_added_monitors!(nodes[1], 0);
914
915         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
916         // and not forwarded.
917         expect_pending_htlcs_forwardable!(nodes[1]);
918         check_added_monitors!(nodes[1], 0);
919         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
920
921         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
922                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
923                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
924                 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
925                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
926                 check_added_monitors!(nodes[2], 1);
927
928                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
929                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
930                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
931                 check_added_monitors!(nodes[1], 1);
932                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
933                 (Some(payment_preimage_4), Some(payment_hash_4))
934         } else { (None, None) };
935
936         // Restore monitor updating, ensuring we immediately get a fail-back update and a
937         // update_add update.
938         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
939         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
940         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
941         check_added_monitors!(nodes[1], 0);
942         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
943         check_added_monitors!(nodes[1], 1);
944
945         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
946         if test_ignore_second_cs {
947                 assert_eq!(events_3.len(), 3);
948         } else {
949                 assert_eq!(events_3.len(), 2);
950         }
951
952         // Note that the ordering of the events for different nodes is non-prescriptive, though the
953         // ordering of the two events that both go to nodes[2] have to stay in the same order.
954         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
955         let messages_a = match nodes_0_event {
956                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
957                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
958                         assert!(updates.update_fulfill_htlcs.is_empty());
959                         assert_eq!(updates.update_fail_htlcs.len(), 1);
960                         assert!(updates.update_fail_malformed_htlcs.is_empty());
961                         assert!(updates.update_add_htlcs.is_empty());
962                         assert!(updates.update_fee.is_none());
963                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
964                 },
965                 _ => panic!("Unexpected event type!"),
966         };
967
968         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
969         let send_event_b = SendEvent::from_event(nodes_2_event);
970         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
971
972         let raa = if test_ignore_second_cs {
973                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
974                 match nodes_2_event {
975                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
976                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
977                                 Some(msg.clone())
978                         },
979                         _ => panic!("Unexpected event"),
980                 }
981         } else { None };
982
983         // Now deliver the new messages...
984
985         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
986         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
987         expect_payment_failed!(nodes[0], payment_hash_1, true);
988
989         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
990         let as_cs;
991         if test_ignore_second_cs {
992                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
993                 check_added_monitors!(nodes[2], 1);
994                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
995                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
996                 check_added_monitors!(nodes[2], 1);
997                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
998                 assert!(bs_cs.update_add_htlcs.is_empty());
999                 assert!(bs_cs.update_fail_htlcs.is_empty());
1000                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
1001                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
1002                 assert!(bs_cs.update_fee.is_none());
1003
1004                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1005                 check_added_monitors!(nodes[1], 1);
1006                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1007
1008                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1009                 check_added_monitors!(nodes[1], 1);
1010         } else {
1011                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1012                 check_added_monitors!(nodes[2], 1);
1013
1014                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1015                 // As both messages are for nodes[1], they're in order.
1016                 assert_eq!(bs_revoke_and_commit.len(), 2);
1017                 match bs_revoke_and_commit[0] {
1018                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1019                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1020                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1021                                 check_added_monitors!(nodes[1], 1);
1022                         },
1023                         _ => panic!("Unexpected event"),
1024                 }
1025
1026                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1027
1028                 match bs_revoke_and_commit[1] {
1029                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1030                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1031                                 assert!(updates.update_add_htlcs.is_empty());
1032                                 assert!(updates.update_fail_htlcs.is_empty());
1033                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1034                                 assert!(updates.update_fulfill_htlcs.is_empty());
1035                                 assert!(updates.update_fee.is_none());
1036                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1037                                 check_added_monitors!(nodes[1], 1);
1038                         },
1039                         _ => panic!("Unexpected event"),
1040                 }
1041         }
1042
1043         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1044         assert!(as_cs.update_fail_htlcs.is_empty());
1045         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1046         assert!(as_cs.update_fulfill_htlcs.is_empty());
1047         assert!(as_cs.update_fee.is_none());
1048         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1049
1050
1051         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1052         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1053         check_added_monitors!(nodes[2], 1);
1054         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1055
1056         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1057         check_added_monitors!(nodes[2], 1);
1058         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1059
1060         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1061         check_added_monitors!(nodes[1], 1);
1062         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1063
1064         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1065         check_added_monitors!(nodes[1], 1);
1066         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1067
1068         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1069         check_added_monitors!(nodes[2], 1);
1070         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1071
1072         expect_pending_htlcs_forwardable!(nodes[2]);
1073
1074         let events_6 = nodes[2].node.get_and_clear_pending_events();
1075         assert_eq!(events_6.len(), 2);
1076         match events_6[0] {
1077                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1078                 _ => panic!("Unexpected event"),
1079         };
1080         match events_6[1] {
1081                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1082                 _ => panic!("Unexpected event"),
1083         };
1084
1085         if test_ignore_second_cs {
1086                 expect_pending_htlcs_forwardable!(nodes[1]);
1087                 check_added_monitors!(nodes[1], 1);
1088
1089                 send_event = SendEvent::from_node(&nodes[1]);
1090                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1091                 assert_eq!(send_event.msgs.len(), 1);
1092                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1093                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1094
1095                 expect_pending_htlcs_forwardable!(nodes[0]);
1096
1097                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1098                 assert_eq!(events_9.len(), 1);
1099                 match events_9[0] {
1100                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1101                         _ => panic!("Unexpected event"),
1102                 };
1103                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1104         }
1105
1106         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1107 }
1108
1109 #[test]
1110 fn test_monitor_update_fail_raa() {
1111         do_test_monitor_update_fail_raa(false);
1112         do_test_monitor_update_fail_raa(true);
1113 }
1114
1115 #[test]
1116 fn test_monitor_update_fail_reestablish() {
1117         // Simple test for message retransmission after monitor update failure on
1118         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1119         // HTLCs).
1120         let chanmon_cfgs = create_chanmon_cfgs(3);
1121         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1122         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1123         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1124         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1125         create_announced_chan_between_nodes(&nodes, 1, 2);
1126
1127         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1128
1129         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1130         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1131
1132         nodes[2].node.claim_funds(payment_preimage);
1133         check_added_monitors!(nodes[2], 1);
1134         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1135
1136         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1137         assert!(updates.update_add_htlcs.is_empty());
1138         assert!(updates.update_fail_htlcs.is_empty());
1139         assert!(updates.update_fail_malformed_htlcs.is_empty());
1140         assert!(updates.update_fee.is_none());
1141         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1142         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1143         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1144         check_added_monitors!(nodes[1], 1);
1145         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1146         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1147
1148         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1149         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1150                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1151         }, true).unwrap();
1152         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1153                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1154         }, false).unwrap();
1155
1156         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1157         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1158
1159         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1160
1161         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1162         assert_eq!(
1163                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1164                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1165
1166         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1167         check_added_monitors!(nodes[1], 1);
1168
1169         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1170         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1171
1172         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1173                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1174         }, true).unwrap();
1175         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1176                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1177         }, false).unwrap();
1178
1179         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1180         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1181
1182         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1183         assert_eq!(
1184                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1185                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1186
1187         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1188         check_added_monitors!(nodes[1], 0);
1189         assert_eq!(
1190                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1191                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1192
1193         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1194         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1195         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1196         check_added_monitors!(nodes[1], 0);
1197
1198         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1199         assert!(updates.update_add_htlcs.is_empty());
1200         assert!(updates.update_fail_htlcs.is_empty());
1201         assert!(updates.update_fail_malformed_htlcs.is_empty());
1202         assert!(updates.update_fee.is_none());
1203         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1204         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1205         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1206         expect_payment_sent!(nodes[0], payment_preimage);
1207 }
1208
1209 #[test]
1210 fn raa_no_response_awaiting_raa_state() {
1211         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1212         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1213         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1214         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1215         let chanmon_cfgs = create_chanmon_cfgs(2);
1216         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1217         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1218         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1219         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1220
1221         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1222         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1223         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1224
1225         // Queue up two payments - one will be delivered right away, one immediately goes into the
1226         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1227         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1228         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1229         // generation during RAA while in monitor-update-failed state.
1230         {
1231                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1232                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1233                 check_added_monitors!(nodes[0], 1);
1234                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1235                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1236                 check_added_monitors!(nodes[0], 0);
1237         }
1238
1239         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1240         assert_eq!(events.len(), 1);
1241         let payment_event = SendEvent::from_event(events.pop().unwrap());
1242         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1243         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1244         check_added_monitors!(nodes[1], 1);
1245
1246         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1247         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1248         check_added_monitors!(nodes[0], 1);
1249         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1250         assert_eq!(events.len(), 1);
1251         let payment_event = SendEvent::from_event(events.pop().unwrap());
1252
1253         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1254         check_added_monitors!(nodes[0], 1);
1255         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1256
1257         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1258         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1259         // then restore channel monitor updates.
1260         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1261         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1262         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1263         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1264         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1265         check_added_monitors!(nodes[1], 1);
1266         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1267
1268         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1269         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1270         check_added_monitors!(nodes[1], 1);
1271
1272         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1273         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1274         // nodes[1] should be AwaitingRAA here!
1275         check_added_monitors!(nodes[1], 0);
1276         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1277         expect_pending_htlcs_forwardable!(nodes[1]);
1278         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1279
1280         // We send a third payment here, which is somewhat of a redundant test, but the
1281         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1282         // commitment transaction states) whereas here we can explicitly check for it.
1283         {
1284                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1285                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1286                 check_added_monitors!(nodes[0], 0);
1287                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1288         }
1289         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1290         check_added_monitors!(nodes[0], 1);
1291         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1292         assert_eq!(events.len(), 1);
1293         let payment_event = SendEvent::from_event(events.pop().unwrap());
1294
1295         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1296         check_added_monitors!(nodes[0], 1);
1297         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1298
1299         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1300         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1301         check_added_monitors!(nodes[1], 1);
1302         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1303
1304         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1305         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1306         check_added_monitors!(nodes[1], 1);
1307         expect_pending_htlcs_forwardable!(nodes[1]);
1308         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1309         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1310
1311         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1312         check_added_monitors!(nodes[0], 1);
1313
1314         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1315         check_added_monitors!(nodes[0], 1);
1316         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1317
1318         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1319         check_added_monitors!(nodes[1], 1);
1320         expect_pending_htlcs_forwardable!(nodes[1]);
1321         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1322
1323         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1324         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1325         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1326 }
1327
1328 #[test]
1329 fn claim_while_disconnected_monitor_update_fail() {
1330         // Test for claiming a payment while disconnected and then having the resulting
1331         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1332         // contrived case for nodes with network instability.
1333         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1334         // code introduced a regression in this test (specifically, this caught a removal of the
1335         // channel_reestablish handling ensuring the order was sensical given the messages used).
1336         let chanmon_cfgs = create_chanmon_cfgs(2);
1337         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1338         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1339         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1340         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1341
1342         // Forward a payment for B to claim
1343         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1344
1345         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1346         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1347
1348         nodes[1].node.claim_funds(payment_preimage_1);
1349         check_added_monitors!(nodes[1], 1);
1350         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1351
1352         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1353                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1354         }, true).unwrap();
1355         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1356                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1357         }, false).unwrap();
1358
1359         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1360         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1361
1362         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1363         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1364
1365         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1366         // update.
1367         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1368
1369         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1370         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1371         check_added_monitors!(nodes[1], 1);
1372         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1373
1374         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1375         // the monitor still failed
1376         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1377         {
1378                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1379                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1380                 check_added_monitors!(nodes[0], 1);
1381         }
1382
1383         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1384         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1385         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1386         check_added_monitors!(nodes[1], 1);
1387         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1388         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1389         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1390
1391         // Now un-fail the monitor, which will result in B sending its original commitment update,
1392         // receiving the commitment update from A, and the resulting commitment dances.
1393         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1394         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1395         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1396         check_added_monitors!(nodes[1], 0);
1397
1398         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1399         assert_eq!(bs_msgs.len(), 2);
1400
1401         match bs_msgs[0] {
1402                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1403                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1404                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1405                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1406                         check_added_monitors!(nodes[0], 1);
1407
1408                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1409                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1410                         check_added_monitors!(nodes[1], 1);
1411                 },
1412                 _ => panic!("Unexpected event"),
1413         }
1414
1415         match bs_msgs[1] {
1416                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1417                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1418                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1419                         check_added_monitors!(nodes[0], 1);
1420                 },
1421                 _ => panic!("Unexpected event"),
1422         }
1423
1424         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1425
1426         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1427         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1428         check_added_monitors!(nodes[0], 1);
1429         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1430
1431         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1432         check_added_monitors!(nodes[1], 1);
1433         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1434         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1435         check_added_monitors!(nodes[1], 1);
1436
1437         expect_pending_htlcs_forwardable!(nodes[1]);
1438         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1439
1440         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1441         check_added_monitors!(nodes[0], 1);
1442         expect_payment_sent!(nodes[0], payment_preimage_1);
1443
1444         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1445 }
1446
1447 #[test]
1448 fn monitor_failed_no_reestablish_response() {
1449         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1450         // response to a commitment_signed.
1451         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1452         // debug_assert!() failure in channel_reestablish handling.
1453         let chanmon_cfgs = create_chanmon_cfgs(2);
1454         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1455         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1456         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1457         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1458         {
1459                 let mut node_0_per_peer_lock;
1460                 let mut node_0_peer_state_lock;
1461                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1462         }
1463         {
1464                 let mut node_1_per_peer_lock;
1465                 let mut node_1_peer_state_lock;
1466                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1467         }
1468
1469         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1470         // on receipt).
1471         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1472         {
1473                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1474                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1475                 check_added_monitors!(nodes[0], 1);
1476         }
1477
1478         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1479         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1480         assert_eq!(events.len(), 1);
1481         let payment_event = SendEvent::from_event(events.pop().unwrap());
1482         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1483         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1484         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1485         check_added_monitors!(nodes[1], 1);
1486
1487         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1488         // is still failing to update monitors.
1489         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1490         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1491
1492         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1493                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1494         }, true).unwrap();
1495         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1496                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1497         }, false).unwrap();
1498
1499         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1500         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1501
1502         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1503         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1504         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1505         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1506
1507         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1508         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1509         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1510         check_added_monitors!(nodes[1], 0);
1511         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1512
1513         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1514         check_added_monitors!(nodes[0], 1);
1515         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1516         check_added_monitors!(nodes[0], 1);
1517
1518         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1519         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1520         check_added_monitors!(nodes[1], 1);
1521
1522         expect_pending_htlcs_forwardable!(nodes[1]);
1523         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1524
1525         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1526 }
1527
1528 #[test]
1529 fn first_message_on_recv_ordering() {
1530         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1531         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1532         // a commitment_signed which needs to send an RAA first.
1533         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1534         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1535         // response. To do this, we start routing two payments, with the final RAA for the first being
1536         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1537         // have no pending response but will want to send a RAA/CS (with the updates for the second
1538         // payment applied).
1539         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1540         let chanmon_cfgs = create_chanmon_cfgs(2);
1541         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1542         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1543         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1544         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1545
1546         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1547         // can deliver it and fail the monitor update.
1548         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1549         {
1550                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1551                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1552                 check_added_monitors!(nodes[0], 1);
1553         }
1554
1555         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1556         assert_eq!(events.len(), 1);
1557         let payment_event = SendEvent::from_event(events.pop().unwrap());
1558         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1559         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1560         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1561         check_added_monitors!(nodes[1], 1);
1562         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1563
1564         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1565         check_added_monitors!(nodes[0], 1);
1566         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1567         check_added_monitors!(nodes[0], 1);
1568
1569         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1570
1571         // Route the second payment, generating an update_add_htlc/commitment_signed
1572         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1573         {
1574                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1575                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1576                 check_added_monitors!(nodes[0], 1);
1577         }
1578         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1579         assert_eq!(events.len(), 1);
1580         let payment_event = SendEvent::from_event(events.pop().unwrap());
1581         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1582
1583         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1584
1585         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1586         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1587         // to the next message also tests resetting the delivery order.
1588         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1589         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1590         check_added_monitors!(nodes[1], 1);
1591
1592         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1593         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1594         // appropriate HTLC acceptance).
1595         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1596         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1597         check_added_monitors!(nodes[1], 1);
1598         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1599
1600         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1601         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1602         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1603         check_added_monitors!(nodes[1], 0);
1604
1605         expect_pending_htlcs_forwardable!(nodes[1]);
1606         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1607
1608         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1609         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1610         check_added_monitors!(nodes[0], 1);
1611         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1612         check_added_monitors!(nodes[0], 1);
1613
1614         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1615         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1616         check_added_monitors!(nodes[1], 1);
1617
1618         expect_pending_htlcs_forwardable!(nodes[1]);
1619         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1620
1621         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1622         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1623 }
1624
1625 #[test]
1626 fn test_monitor_update_fail_claim() {
1627         // Basic test for monitor update failures when processing claim_funds calls.
1628         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1629         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1630         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1631         // the payments from C onwards to A.
1632         let chanmon_cfgs = create_chanmon_cfgs(3);
1633         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1634         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1635         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1636         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1637         create_announced_chan_between_nodes(&nodes, 1, 2);
1638
1639         // Rebalance a bit so that we can send backwards from 3 to 2.
1640         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1641
1642         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1643
1644         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1645         nodes[1].node.claim_funds(payment_preimage_1);
1646         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1647         check_added_monitors!(nodes[1], 1);
1648
1649         // Note that at this point there is a pending commitment transaction update for A being held by
1650         // B. Even when we go to send the payment from C through B to A, B will not update this
1651         // already-signed commitment transaction and will instead wait for it to resolve before
1652         // forwarding the payment onwards.
1653
1654         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1655         {
1656                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1657                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1658                 check_added_monitors!(nodes[2], 1);
1659         }
1660
1661         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1662         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1663         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1664
1665         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1666         assert_eq!(events.len(), 1);
1667         let payment_event = SendEvent::from_event(events.pop().unwrap());
1668         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1669         let events = nodes[1].node.get_and_clear_pending_msg_events();
1670         assert_eq!(events.len(), 0);
1671         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1672         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1673
1674         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1675         nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1676                 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1677         check_added_monitors!(nodes[2], 1);
1678
1679         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1680         assert_eq!(events.len(), 1);
1681         let payment_event = SendEvent::from_event(events.pop().unwrap());
1682         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1683         let events = nodes[1].node.get_and_clear_pending_msg_events();
1684         assert_eq!(events.len(), 0);
1685         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1686
1687         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1688         let channel_id = chan_1.2;
1689         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1690         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1691         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1692         check_added_monitors!(nodes[1], 0);
1693
1694         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1695         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1696         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1697         expect_payment_sent!(nodes[0], payment_preimage_1);
1698
1699         // Get the payment forwards, note that they were batched into one commitment update.
1700         nodes[1].node.process_pending_htlc_forwards();
1701         check_added_monitors!(nodes[1], 1);
1702         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1703         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1704         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1705         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1706         expect_pending_htlcs_forwardable!(nodes[0]);
1707
1708         let events = nodes[0].node.get_and_clear_pending_events();
1709         assert_eq!(events.len(), 2);
1710         match events[0] {
1711                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1712                         assert_eq!(payment_hash_2, *payment_hash);
1713                         assert_eq!(1_000_000, amount_msat);
1714                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1715                         assert_eq!(via_channel_id, Some(channel_id));
1716                         assert_eq!(via_user_channel_id, Some(42));
1717                         match &purpose {
1718                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1719                                         assert!(payment_preimage.is_none());
1720                                         assert_eq!(payment_secret_2, *payment_secret);
1721                                 },
1722                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1723                         }
1724                 },
1725                 _ => panic!("Unexpected event"),
1726         }
1727         match events[1] {
1728                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1729                         assert_eq!(payment_hash_3, *payment_hash);
1730                         assert_eq!(1_000_000, amount_msat);
1731                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1732                         assert_eq!(via_channel_id, Some(channel_id));
1733                         match &purpose {
1734                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1735                                         assert!(payment_preimage.is_none());
1736                                         assert_eq!(payment_secret_3, *payment_secret);
1737                                 },
1738                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1739                         }
1740                 },
1741                 _ => panic!("Unexpected event"),
1742         }
1743 }
1744
1745 #[test]
1746 fn test_monitor_update_on_pending_forwards() {
1747         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1748         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1749         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1750         // from C to A will be pending a forward to A.
1751         let chanmon_cfgs = create_chanmon_cfgs(3);
1752         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1753         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1754         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1755         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1756         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1757
1758         // Rebalance a bit so that we can send backwards from 3 to 1.
1759         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1760
1761         let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1762         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1763         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1764         check_added_monitors!(nodes[2], 1);
1765
1766         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1767         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1768         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1769         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1770
1771         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1772         {
1773                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1774                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1775                 check_added_monitors!(nodes[2], 1);
1776         }
1777
1778         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1779         assert_eq!(events.len(), 1);
1780         let payment_event = SendEvent::from_event(events.pop().unwrap());
1781         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1782         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1783
1784         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1785         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1786         check_added_monitors!(nodes[1], 1);
1787
1788         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1789         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1790         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1791         check_added_monitors!(nodes[1], 0);
1792
1793         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1794         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1795         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1796         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1797
1798         let events = nodes[0].node.get_and_clear_pending_events();
1799         assert_eq!(events.len(), 3);
1800         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1801                 assert_eq!(payment_hash, payment_hash_1);
1802                 assert!(payment_failed_permanently);
1803         } else { panic!("Unexpected event!"); }
1804         match events[2] {
1805                 Event::PaymentFailed { payment_hash, .. } => {
1806                         assert_eq!(payment_hash, payment_hash_1);
1807                 },
1808                 _ => panic!("Unexpected event"),
1809         }
1810         match events[0] {
1811                 Event::PendingHTLCsForwardable { .. } => { },
1812                 _ => panic!("Unexpected event"),
1813         };
1814         nodes[0].node.process_pending_htlc_forwards();
1815         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1816
1817         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1818 }
1819
1820 #[test]
1821 fn monitor_update_claim_fail_no_response() {
1822         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1823         // to channel being AwaitingRAA).
1824         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1825         // code was broken.
1826         let chanmon_cfgs = create_chanmon_cfgs(2);
1827         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1828         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1829         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1830         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1831
1832         // Forward a payment for B to claim
1833         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1834
1835         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1836         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1837         {
1838                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1839                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1840                 check_added_monitors!(nodes[0], 1);
1841         }
1842
1843         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1844         assert_eq!(events.len(), 1);
1845         let payment_event = SendEvent::from_event(events.pop().unwrap());
1846         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1847         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1848
1849         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1850         nodes[1].node.claim_funds(payment_preimage_1);
1851         check_added_monitors!(nodes[1], 1);
1852
1853         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1854
1855         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1856         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1857         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1858         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1859         check_added_monitors!(nodes[1], 0);
1860         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1861
1862         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1863         check_added_monitors!(nodes[1], 1);
1864         expect_pending_htlcs_forwardable!(nodes[1]);
1865         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1866
1867         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1868         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1869         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1870         expect_payment_sent!(nodes[0], payment_preimage_1);
1871
1872         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1873 }
1874
1875 // restore_b_before_conf has no meaning if !confirm_a_first
1876 // restore_b_before_lock has no meaning if confirm_a_first
1877 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1878         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1879         // the channel setup happily after the update is restored.
1880         let chanmon_cfgs = create_chanmon_cfgs(2);
1881         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1882         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1883         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1884
1885         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1886         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1887         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1888
1889         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1890
1891         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1892         check_added_monitors!(nodes[0], 0);
1893
1894         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1895         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1896         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1897         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1898         check_added_monitors!(nodes[1], 1);
1899
1900         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1901         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1902         check_added_monitors!(nodes[0], 1);
1903         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1904         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1905         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1906         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1907         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1908         check_added_monitors!(nodes[0], 0);
1909         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1910
1911         let events = nodes[0].node.get_and_clear_pending_events();
1912         assert_eq!(events.len(), 0);
1913         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1914         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1915
1916         if confirm_a_first {
1917                 confirm_transaction(&nodes[0], &funding_tx);
1918                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1919                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1920                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1921         } else {
1922                 assert!(!restore_b_before_conf);
1923                 confirm_transaction(&nodes[1], &funding_tx);
1924                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1925         }
1926
1927         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1928         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1929         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1930         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
1931         reconnect_args.send_channel_ready.1 = confirm_a_first;
1932         reconnect_nodes(reconnect_args);
1933
1934         // But we want to re-emit ChannelPending
1935         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1936         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1937         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1938
1939         if !restore_b_before_conf {
1940                 confirm_transaction(&nodes[1], &funding_tx);
1941                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1942                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1943         }
1944         if !confirm_a_first && !restore_b_before_lock {
1945                 confirm_transaction(&nodes[0], &funding_tx);
1946                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1947                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1948                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1949         }
1950
1951         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1952         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1953         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1954         check_added_monitors!(nodes[1], 0);
1955
1956         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1957                 if !restore_b_before_lock {
1958                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1959                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1960                 } else {
1961                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1962                         confirm_transaction(&nodes[0], &funding_tx);
1963                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1964                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1965                 }
1966         } else {
1967                 if restore_b_before_conf {
1968                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1969                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1970                         confirm_transaction(&nodes[1], &funding_tx);
1971                 }
1972                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1973                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1974         };
1975         for node in nodes.iter() {
1976                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1977                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1978                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1979         }
1980
1981         if !restore_b_before_lock {
1982                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1983         } else {
1984                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1985         }
1986
1987
1988         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1989         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1990         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1991         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1992 }
1993
1994 #[test]
1995 fn during_funding_monitor_fail() {
1996         do_during_funding_monitor_fail(true, true, false);
1997         do_during_funding_monitor_fail(true, false, false);
1998         do_during_funding_monitor_fail(false, false, false);
1999         do_during_funding_monitor_fail(false, false, true);
2000 }
2001
2002 #[test]
2003 fn test_path_paused_mpp() {
2004         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
2005         // monitor update
2006         let chanmon_cfgs = create_chanmon_cfgs(4);
2007         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2008         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2009         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2010
2011         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2012         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
2013         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
2014         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
2015
2016         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
2017
2018         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
2019         let path = route.paths[0].clone();
2020         route.paths.push(path);
2021         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
2022         route.paths[0].hops[0].short_channel_id = chan_1_id;
2023         route.paths[0].hops[1].short_channel_id = chan_3_id;
2024         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
2025         route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
2026         route.paths[1].hops[1].short_channel_id = chan_4_id;
2027
2028         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2029         // (for the path 0 -> 2 -> 3) fails.
2030         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2031         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2032
2033         // Now check that we get the right return value, indicating that the first path succeeded but
2034         // the second got a MonitorUpdateInProgress err. This implies
2035         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2036         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2037                 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2038         ) {
2039                 assert_eq!(results.len(), 2);
2040                 if let Ok(()) = results[0] {} else { panic!(); }
2041                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2042         } else { panic!(); }
2043         check_added_monitors!(nodes[0], 2);
2044         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2045
2046         // Pass the first HTLC of the payment along to nodes[3].
2047         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2048         assert_eq!(events.len(), 1);
2049         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2050
2051         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2052         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2053         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2054         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2055         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2056         assert_eq!(events.len(), 1);
2057         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2058
2059         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2060 }
2061
2062 #[test]
2063 fn test_pending_update_fee_ack_on_reconnect() {
2064         // In early versions of our automated fee update patch, nodes did not correctly use the
2065         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2066         // undelivered commitment_signed.
2067         //
2068         // B sends A new HTLC + CS, not delivered
2069         // A sends B update_fee + CS
2070         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2071         // reconnect
2072         // B resends initial CS, using the original fee
2073
2074         let chanmon_cfgs = create_chanmon_cfgs(2);
2075         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2076         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2077         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2078
2079         create_announced_chan_between_nodes(&nodes, 0, 1);
2080         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2081
2082         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2083         nodes[1].node.send_payment_with_route(&route, payment_hash,
2084                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2085         check_added_monitors!(nodes[1], 1);
2086         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2087         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2088
2089         {
2090                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2091                 *feerate_lock *= 2;
2092         }
2093         nodes[0].node.timer_tick_occurred();
2094         check_added_monitors!(nodes[0], 1);
2095         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2096         assert!(as_update_fee_msgs.update_fee.is_some());
2097
2098         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2099         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2100         check_added_monitors!(nodes[1], 1);
2101         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2102         // bs_first_raa is not delivered until it is re-generated after reconnect
2103
2104         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2105         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2106
2107         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2108                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2109         }, true).unwrap();
2110         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2111         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2112                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2113         }, false).unwrap();
2114         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2115
2116         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2117         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2118         assert_eq!(bs_resend_msgs.len(), 3);
2119         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2120                 assert_eq!(*updates, bs_initial_send_msgs);
2121         } else { panic!(); }
2122         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2123                 assert_eq!(*msg, bs_first_raa);
2124         } else { panic!(); }
2125         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2126
2127         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2128         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2129
2130         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2131         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2132         check_added_monitors!(nodes[0], 1);
2133         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2134         check_added_monitors!(nodes[1], 1);
2135         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2136
2137         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2138         check_added_monitors!(nodes[0], 1);
2139         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2140         check_added_monitors!(nodes[1], 1);
2141         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2142
2143         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2144         check_added_monitors!(nodes[0], 1);
2145         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2146         check_added_monitors!(nodes[0], 1);
2147
2148         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2149         check_added_monitors!(nodes[1], 1);
2150
2151         expect_pending_htlcs_forwardable!(nodes[0]);
2152         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2153
2154         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2155 }
2156
2157 #[test]
2158 fn test_fail_htlc_on_broadcast_after_claim() {
2159         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2160         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2161         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2162         // HTLC was not included in a confirmed commitment transaction.
2163         //
2164         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2165         // channel immediately before commitment occurs. After the commitment transaction reaches
2166         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2167         let chanmon_cfgs = create_chanmon_cfgs(3);
2168         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2169         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2170         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2171
2172         create_announced_chan_between_nodes(&nodes, 0, 1);
2173         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2174
2175         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2176
2177         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2178         assert_eq!(bs_txn.len(), 1);
2179
2180         nodes[2].node.claim_funds(payment_preimage);
2181         check_added_monitors!(nodes[2], 1);
2182         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2183
2184         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2185         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2186         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2187         check_added_monitors!(nodes[1], 1);
2188         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2189
2190         mine_transaction(&nodes[1], &bs_txn[0]);
2191         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2192         check_closed_broadcast!(nodes[1], true);
2193         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2194         check_added_monitors!(nodes[1], 1);
2195         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2196
2197         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2198         expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2199         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2200         expect_payment_path_successful!(nodes[0]);
2201 }
2202
2203 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2204         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2205         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2206         // explicitly here.
2207         let chanmon_cfgs = create_chanmon_cfgs(2);
2208         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2209         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2210         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2211
2212         create_announced_chan_between_nodes(&nodes, 0, 1);
2213         send_payment(&nodes[0], &[&nodes[1]], 1000);
2214
2215         {
2216                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2217                 *feerate_lock += 20;
2218         }
2219         nodes[0].node.timer_tick_occurred();
2220         check_added_monitors!(nodes[0], 1);
2221         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2222         assert!(update_msgs.update_fee.is_some());
2223         if deliver_update {
2224                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2225         }
2226
2227         if parallel_updates {
2228                 {
2229                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2230                         *feerate_lock += 20;
2231                 }
2232                 nodes[0].node.timer_tick_occurred();
2233                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2234         }
2235
2236         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2237         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2238
2239         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2240                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2241         }, true).unwrap();
2242         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2243         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2244                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2245         }, false).unwrap();
2246         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2247
2248         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2249         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2250         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2251
2252         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2253         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2254         assert_eq!(as_reconnect_msgs.len(), 2);
2255         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2256         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2257                 { updates } else { panic!(); };
2258         assert!(update_msgs.update_fee.is_some());
2259         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2260         if parallel_updates {
2261                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2262                 check_added_monitors!(nodes[1], 1);
2263                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2264                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2265                 check_added_monitors!(nodes[0], 1);
2266                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2267
2268                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2269                 check_added_monitors!(nodes[0], 1);
2270                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2271
2272                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2273                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2274                 check_added_monitors!(nodes[1], 1);
2275                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2276
2277                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2278                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2279                 check_added_monitors!(nodes[1], 1);
2280
2281                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2282                 check_added_monitors!(nodes[0], 1);
2283
2284                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2285                 check_added_monitors!(nodes[0], 1);
2286                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2287
2288                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2289                 check_added_monitors!(nodes[1], 1);
2290         } else {
2291                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2292         }
2293
2294         send_payment(&nodes[0], &[&nodes[1]], 1000);
2295 }
2296 #[test]
2297 fn update_fee_resend_test() {
2298         do_update_fee_resend_test(false, false);
2299         do_update_fee_resend_test(true, false);
2300         do_update_fee_resend_test(false, true);
2301         do_update_fee_resend_test(true, true);
2302 }
2303
2304 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2305         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2306         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2307         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2308         // which failed in such a case).
2309         let chanmon_cfgs = create_chanmon_cfgs(2);
2310         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2311         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2312         let persister: test_utils::TestPersister;
2313         let new_chain_monitor: test_utils::TestChainMonitor;
2314         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2315         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2316
2317         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2318         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2319         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2320
2321         // Do a really complicated dance to get an HTLC into the holding cell, with
2322         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2323         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2324         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2325         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2326         // flags.
2327         //
2328         // We do this by:
2329         //  a) routing a payment from node B to node A,
2330         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2331         //     putting node A in AwaitingRemoteRevoke,
2332         //  c) sending a second payment from node A to node B, which is immediately placed in the
2333         //     holding cell,
2334         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2335         //     when we try to persist the payment preimage,
2336         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2337         //     clearing AwaitingRemoteRevoke on node A.
2338         //
2339         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2340         // (c) will not be freed from the holding cell.
2341         let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2342
2343         nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2344                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2345         check_added_monitors!(nodes[0], 1);
2346         let send = SendEvent::from_node(&nodes[0]);
2347         assert_eq!(send.msgs.len(), 1);
2348
2349         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2350                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2351         check_added_monitors!(nodes[0], 0);
2352
2353         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2354         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2355         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2356         nodes[0].node.claim_funds(payment_preimage_0);
2357         check_added_monitors!(nodes[0], 1);
2358
2359         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2360         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2361         check_added_monitors!(nodes[1], 1);
2362
2363         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2364
2365         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2366         check_added_monitors!(nodes[0], 1);
2367
2368         if disconnect {
2369                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2370                 // disconnect the peers. Note that the fuzzer originally found this issue because
2371                 // deserializing a ChannelManager in this state causes an assertion failure.
2372                 if reload_a {
2373                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2374                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2375                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2376                 } else {
2377                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2378                 }
2379                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2380
2381                 // Now reconnect the two
2382                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2383                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2384                 }, true).unwrap();
2385                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2386                 assert_eq!(reestablish_1.len(), 1);
2387                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2388                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2389                 }, false).unwrap();
2390                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2391                 assert_eq!(reestablish_2.len(), 1);
2392
2393                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2394                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2395                 check_added_monitors!(nodes[1], 0);
2396
2397                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2398                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2399
2400                 assert!(resp_0.0.is_none());
2401                 assert!(resp_0.1.is_none());
2402                 assert!(resp_0.2.is_none());
2403                 assert!(resp_1.0.is_none());
2404                 assert!(resp_1.1.is_none());
2405
2406                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2407                 // moment).
2408                 if let Some(pending_cs) = resp_1.2 {
2409                         assert!(pending_cs.update_add_htlcs.is_empty());
2410                         assert!(pending_cs.update_fail_htlcs.is_empty());
2411                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2412                         assert_eq!(pending_cs.commitment_signed, cs);
2413                 } else { panic!(); }
2414
2415                 if reload_a {
2416                         // The two pending monitor updates were replayed (but are still pending).
2417                         check_added_monitors(&nodes[0], 2);
2418                 } else {
2419                         // There should be no monitor updates as we are still pending awaiting a failed one.
2420                         check_added_monitors(&nodes[0], 0);
2421                 }
2422                 check_added_monitors(&nodes[1], 0);
2423         }
2424
2425         // If we finish updating the monitor, we should free the holding cell right away (this did
2426         // not occur prior to #756).
2427         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2428         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2429         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2430         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2431
2432         // New outbound messages should be generated immediately upon a call to
2433         // get_and_clear_pending_msg_events (but not before).
2434         check_added_monitors!(nodes[0], 0);
2435         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2436         check_added_monitors!(nodes[0], 1);
2437         assert_eq!(events.len(), 1);
2438
2439         // Deliver the pending in-flight CS
2440         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2441         check_added_monitors!(nodes[0], 1);
2442
2443         let commitment_msg = match events.pop().unwrap() {
2444                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2445                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2446                         assert!(updates.update_fail_htlcs.is_empty());
2447                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2448                         assert!(updates.update_fee.is_none());
2449                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2450                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2451                         expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
2452                         assert_eq!(updates.update_add_htlcs.len(), 1);
2453                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2454                         updates.commitment_signed
2455                 },
2456                 _ => panic!("Unexpected event type!"),
2457         };
2458
2459         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2460         check_added_monitors!(nodes[1], 1);
2461
2462         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2463         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2464         expect_pending_htlcs_forwardable!(nodes[1]);
2465         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2466         check_added_monitors!(nodes[1], 1);
2467
2468         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
2469
2470         let events = nodes[1].node.get_and_clear_pending_events();
2471         assert_eq!(events.len(), 2);
2472         match events[0] {
2473                 Event::PendingHTLCsForwardable { .. } => { },
2474                 _ => panic!("Unexpected event"),
2475         };
2476         match events[1] {
2477                 Event::PaymentPathSuccessful { .. } => { },
2478                 _ => panic!("Unexpected event"),
2479         };
2480
2481         nodes[1].node.process_pending_htlc_forwards();
2482         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2483
2484         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2485         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2486 }
2487 #[test]
2488 fn channel_holding_cell_serialize() {
2489         do_channel_holding_cell_serialize(true, true);
2490         do_channel_holding_cell_serialize(true, false);
2491         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2492 }
2493
2494 #[derive(PartialEq)]
2495 enum HTLCStatusAtDupClaim {
2496         Received,
2497         HoldingCell,
2498         Cleared,
2499 }
2500 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2501         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2502         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2503         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2504         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2505         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2506         // channel on which the inbound HTLC was received.
2507         let chanmon_cfgs = create_chanmon_cfgs(3);
2508         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2509         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2510         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2511
2512         create_announced_chan_between_nodes(&nodes, 0, 1);
2513         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2514
2515         let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2516
2517         let mut as_raa = None;
2518         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2519                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2520                 // awaiting a remote revoke_and_ack from nodes[0].
2521                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2522                 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2523                         RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2524                 check_added_monitors!(nodes[0], 1);
2525
2526                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2527                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2528                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2529                 check_added_monitors!(nodes[1], 1);
2530
2531                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2532                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2533                 check_added_monitors!(nodes[0], 1);
2534                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2535                 check_added_monitors!(nodes[0], 1);
2536
2537                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2538         }
2539
2540         let fulfill_msg = msgs::UpdateFulfillHTLC {
2541                 channel_id: chan_id_2,
2542                 htlc_id: 0,
2543                 payment_preimage,
2544         };
2545         if second_fails {
2546                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2547                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2548                 check_added_monitors!(nodes[2], 1);
2549                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2550         } else {
2551                 nodes[2].node.claim_funds(payment_preimage);
2552                 check_added_monitors!(nodes[2], 1);
2553                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2554
2555                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2556                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2557                 // Check that the message we're about to deliver matches the one generated:
2558                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2559         }
2560         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2561         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2562         check_added_monitors!(nodes[1], 1);
2563
2564         let mut bs_updates = None;
2565         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2566                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2567                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2568                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2569                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2570                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2571                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2572                         expect_payment_path_successful!(nodes[0]);
2573                 }
2574         } else {
2575                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2576         }
2577
2578         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2579         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2580
2581         if second_fails {
2582                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2583                 reconnect_args.pending_htlc_fails.0 = 1;
2584                 reconnect_nodes(reconnect_args);
2585                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2586         } else {
2587                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2588                 reconnect_args.pending_htlc_claims.0 = 1;
2589                 reconnect_nodes(reconnect_args);
2590         }
2591
2592         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2593                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2594                 check_added_monitors!(nodes[1], 1);
2595                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2596
2597                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2598                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2599                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2600                 expect_payment_sent_without_paths!(nodes[0], payment_preimage);
2601         }
2602         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2603                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2604                 expect_payment_path_successful!(nodes[0]);
2605         }
2606 }
2607
2608 #[test]
2609 fn test_reconnect_dup_htlc_claims() {
2610         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2611         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2612         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2613         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2614         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2615         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2616 }
2617
2618 #[test]
2619 fn test_temporary_error_during_shutdown() {
2620         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2621         // close.
2622         let mut config = test_default_channel_config();
2623         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2624
2625         let chanmon_cfgs = create_chanmon_cfgs(2);
2626         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2627         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2628         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2629
2630         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2631
2632         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2633         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2634
2635         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2636         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2637         check_added_monitors!(nodes[1], 1);
2638
2639         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2640         check_added_monitors!(nodes[0], 1);
2641
2642         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2643
2644         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2645         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2646
2647         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2648         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2649         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2650
2651         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2652
2653         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2654         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2655         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2656
2657         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2658         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2659         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2660
2661         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2662         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2663         assert!(none_b.is_none());
2664         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2665
2666         assert_eq!(txn_a, txn_b);
2667         assert_eq!(txn_a.len(), 1);
2668         check_spends!(txn_a[0], funding_tx);
2669         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2670         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2671 }
2672
2673 #[test]
2674 fn test_permanent_error_during_sending_shutdown() {
2675         // Test that permanent failures when updating the monitor's shutdown script result in a force
2676         // close when initiating a cooperative close.
2677         let mut config = test_default_channel_config();
2678         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2679
2680         let chanmon_cfgs = create_chanmon_cfgs(2);
2681         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2682         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2683         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2684
2685         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2686         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2687
2688         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2689
2690         // We always send the `shutdown` response when initiating a shutdown, even if we immediately
2691         // close the channel thereafter.
2692         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2693         assert_eq!(msg_events.len(), 3);
2694         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2695         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2696         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2697
2698         check_added_monitors!(nodes[0], 2);
2699         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2700 }
2701
2702 #[test]
2703 fn test_permanent_error_during_handling_shutdown() {
2704         // Test that permanent failures when updating the monitor's shutdown script result in a force
2705         // close when handling a cooperative close.
2706         let mut config = test_default_channel_config();
2707         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2708
2709         let chanmon_cfgs = create_chanmon_cfgs(2);
2710         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2711         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2712         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2713
2714         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2715         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2716
2717         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2718         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2719         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2720
2721         // We always send the `shutdown` response when receiving a shutdown, even if we immediately
2722         // close the channel thereafter.
2723         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2724         assert_eq!(msg_events.len(), 3);
2725         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2726         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2727         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2728
2729         check_added_monitors!(nodes[1], 2);
2730         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2731 }
2732
2733 #[test]
2734 fn double_temp_error() {
2735         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2736         let chanmon_cfgs = create_chanmon_cfgs(2);
2737         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2738         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2739         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2740
2741         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2742
2743         let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2744         let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2745
2746         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2747         // `claim_funds` results in a ChannelMonitorUpdate.
2748         nodes[1].node.claim_funds(payment_preimage_1);
2749         check_added_monitors!(nodes[1], 1);
2750         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2751
2752         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2753         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2754         // which had some asserts that prevented it from being called twice.
2755         nodes[1].node.claim_funds(payment_preimage_2);
2756         check_added_monitors!(nodes[1], 1);
2757         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2758
2759         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2760         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2761         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2762         check_added_monitors!(nodes[1], 0);
2763         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2764
2765         // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2766         // and get both PaymentClaimed events at once.
2767         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2768
2769         let events = nodes[1].node.get_and_clear_pending_events();
2770         assert_eq!(events.len(), 2);
2771         match events[0] {
2772                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2773                 _ => panic!("Unexpected Event: {:?}", events[0]),
2774         }
2775         match events[1] {
2776                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2777                 _ => panic!("Unexpected Event: {:?}", events[1]),
2778         }
2779
2780         assert_eq!(msg_events.len(), 1);
2781         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2782                 match &msg_events[0] {
2783                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2784                                 assert!(update_add_htlcs.is_empty());
2785                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2786                                 assert!(update_fail_htlcs.is_empty());
2787                                 assert!(update_fail_malformed_htlcs.is_empty());
2788                                 assert!(update_fee.is_none());
2789                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2790                         },
2791                         _ => panic!("Unexpected event"),
2792                 }
2793         };
2794         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2795         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2796         check_added_monitors!(nodes[0], 0);
2797         expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
2798         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2799         check_added_monitors!(nodes[0], 1);
2800         nodes[0].node.process_pending_htlc_forwards();
2801         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2802         check_added_monitors!(nodes[1], 0);
2803         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2804         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2805         check_added_monitors!(nodes[1], 1);
2806         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2807         check_added_monitors!(nodes[1], 1);
2808
2809         // Complete the second HTLC.
2810         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2811                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2812                 assert_eq!(events.len(), 2);
2813                 (match &events[0] {
2814                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2815                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2816                                 assert!(updates.update_add_htlcs.is_empty());
2817                                 assert!(updates.update_fail_htlcs.is_empty());
2818                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2819                                 assert!(updates.update_fee.is_none());
2820                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2821                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2822                         },
2823                         _ => panic!("Unexpected event"),
2824                 },
2825                  match events[1] {
2826                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2827                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2828                                  (*msg).clone()
2829                          },
2830                          _ => panic!("Unexpected event"),
2831                  })
2832         };
2833         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2834         check_added_monitors!(nodes[0], 1);
2835         expect_payment_path_successful!(nodes[0]);
2836
2837         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2838         check_added_monitors!(nodes[0], 0);
2839         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2840         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2841         expect_payment_sent!(nodes[0], payment_preimage_2);
2842 }
2843
2844 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2845         // Test that if the monitor update generated in funding_signed is stored async and we restart
2846         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2847         // drop the channel and move on.
2848         let chanmon_cfgs = create_chanmon_cfgs(2);
2849         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2850
2851         let persister: test_utils::TestPersister;
2852         let new_chain_monitor: test_utils::TestChainMonitor;
2853         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2854
2855         let mut chan_config = test_default_channel_config();
2856         chan_config.manually_accept_inbound_channels = true;
2857         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2858
2859         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2860         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2861
2862         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2863         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2864
2865         let events = nodes[1].node.get_and_clear_pending_events();
2866         assert_eq!(events.len(), 1);
2867         match events[0] {
2868                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2869                         if use_0conf {
2870                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2871                         } else {
2872                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2873                         }
2874                 },
2875                 _ => panic!("Unexpected event"),
2876         };
2877
2878         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2879
2880         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2881
2882         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2883         check_added_monitors!(nodes[0], 0);
2884
2885         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2886         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2887         check_added_monitors!(nodes[1], 1);
2888         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2889
2890         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2891         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2892         match &bs_signed_locked[0] {
2893                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2894                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2895
2896                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2897                         check_added_monitors!(nodes[0], 1);
2898                 }
2899                 _ => panic!("Unexpected event"),
2900         }
2901         if use_0conf {
2902                 match &bs_signed_locked[1] {
2903                         MessageSendEvent::SendChannelReady { msg, .. } => {
2904                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2905                         }
2906                         _ => panic!("Unexpected event"),
2907                 }
2908         }
2909
2910         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2911         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2912         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2913
2914         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2915         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2916         // ChannelMonitor lost, we should simply discard the channel.
2917
2918         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2919         // not, so we have to clear them here.
2920         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2921         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2922
2923         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2924         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
2925         assert!(nodes[0].node.list_channels().is_empty());
2926 }
2927
2928 #[test]
2929 fn test_outbound_reload_without_init_mon() {
2930         do_test_outbound_reload_without_init_mon(true);
2931         do_test_outbound_reload_without_init_mon(false);
2932 }
2933
2934 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2935         // Test that if the monitor update generated by funding_transaction_generated is stored async
2936         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2937         // completed we happily drop the channel and move on.
2938         let chanmon_cfgs = create_chanmon_cfgs(2);
2939         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2940
2941         let persister: test_utils::TestPersister;
2942         let new_chain_monitor: test_utils::TestChainMonitor;
2943         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestLogger>;
2944
2945         let mut chan_config = test_default_channel_config();
2946         chan_config.manually_accept_inbound_channels = true;
2947         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2948
2949         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2950         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2951
2952         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2953         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2954
2955         let events = nodes[1].node.get_and_clear_pending_events();
2956         assert_eq!(events.len(), 1);
2957         match events[0] {
2958                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2959                         if use_0conf {
2960                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2961                         } else {
2962                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2963                         }
2964                 },
2965                 _ => panic!("Unexpected event"),
2966         };
2967
2968         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2969
2970         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2971
2972         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2973         check_added_monitors!(nodes[0], 0);
2974
2975         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2976         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2977         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2978         check_added_monitors!(nodes[1], 1);
2979
2980         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2981         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2982         // transaction is confirmed.
2983         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2984
2985         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2986         check_added_monitors!(nodes[0], 1);
2987         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2988
2989         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2990         if lock_commitment {
2991                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
2992                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
2993         }
2994         if use_0conf || lock_commitment {
2995                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
2996                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
2997         }
2998         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2999
3000         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
3001         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
3002         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
3003
3004         // The test framework checks that watched_txn/outputs match the monitor set, which they will
3005         // not, so we have to clear them here.
3006         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
3007         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
3008
3009         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
3010
3011         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
3012         assert!(nodes[1].node.list_channels().is_empty());
3013 }
3014
3015 #[test]
3016 fn test_inbound_reload_without_init_mon() {
3017         do_test_inbound_reload_without_init_mon(true, true);
3018         do_test_inbound_reload_without_init_mon(true, false);
3019         do_test_inbound_reload_without_init_mon(false, true);
3020         do_test_inbound_reload_without_init_mon(false, false);
3021 }