Add PaymentId in ChannelManager.list_recent_payments()
[rust-lightning] / lightning / src / ln / chanmon_update_fail_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test the correct handling of ChannelMonitorUpdateStatus returns from
11 //! monitor updates.
12 //! There are a bunch of these as their handling is relatively error-prone so they are split out
13 //! here. See also the chanmon_fail_consistency fuzz test.
14
15 use bitcoin::blockdata::constants::genesis_block;
16 use bitcoin::hash_types::BlockHash;
17 use bitcoin::network::constants::Network;
18 use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
19 use crate::chain::transaction::OutPoint;
20 use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
21 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
22 use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
23 use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
24 use crate::ln::msgs;
25 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
26 use crate::util::test_channel_signer::TestChannelSigner;
27 use crate::util::errors::APIError;
28 use crate::util::ser::{ReadableArgs, Writeable};
29 use crate::util::test_utils::TestBroadcaster;
30
31 use crate::ln::functional_test_utils::*;
32
33 use crate::util::test_utils;
34
35 use crate::io;
36 use bitcoin::hashes::Hash;
37 use crate::prelude::*;
38 use crate::sync::{Arc, Mutex};
39
40 #[test]
41 fn test_simple_monitor_permanent_update_fail() {
42         // Test that we handle a simple permanent monitor update failure
43         let chanmon_cfgs = create_chanmon_cfgs(2);
44         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
45         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
46         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
47         create_announced_chan_between_nodes(&nodes, 0, 1);
48
49         let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
50         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
51         unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
52                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
53                 ), true, APIError::ChannelUnavailable {..}, {});
54         check_added_monitors!(nodes[0], 2);
55
56         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
57         assert_eq!(events_1.len(), 2);
58         match events_1[0] {
59                 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
60                 _ => panic!("Unexpected event"),
61         };
62         match events_1[1] {
63                 MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
64                 _ => panic!("Unexpected event"),
65         };
66
67         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
68
69         // TODO: Once we hit the chain with the failure transaction we should check that we get a
70         // PaymentPathFailed event
71
72         assert_eq!(nodes[0].node.list_channels().len(), 0);
73         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
74                 [nodes[1].node.get_our_node_id()], 100000);
75 }
76
77 #[test]
78 fn test_monitor_and_persister_update_fail() {
79         // Test that if both updating the `ChannelMonitor` and persisting the updated
80         // `ChannelMonitor` fail, then the failure from updating the `ChannelMonitor`
81         // one that gets returned.
82         let chanmon_cfgs = create_chanmon_cfgs(2);
83         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
84         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
85         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
86
87         // Create some initial channel
88         let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
89         let outpoint = OutPoint { txid: chan.3.txid(), index: 0 };
90
91         // Rebalance the network to generate htlc in the two directions
92         send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
93
94         // Route an HTLC from node 0 to node 1 (but don't settle)
95         let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
96
97         // Make a copy of the ChainMonitor so we can capture the error it returns on a
98         // bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
99         // directly, the node would fail to be `Drop`'d at the end because its
100         // ChannelManager and ChainMonitor would be out of sync.
101         let chain_source = test_utils::TestChainSource::new(Network::Testnet);
102         let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
103         let persister = test_utils::TestPersister::new();
104         let tx_broadcaster = TestBroadcaster {
105                 txn_broadcasted: Mutex::new(Vec::new()),
106                 // Because we will connect a block at height 200 below, we need the TestBroadcaster to know
107                 // that we are at height 200 so that it doesn't think we're violating the time lock
108                 // requirements of transactions broadcasted at that point.
109                 blocks: Arc::new(Mutex::new(vec![(genesis_block(Network::Testnet), 200); 200])),
110         };
111         let chain_mon = {
112                 let new_monitor = {
113                         let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap();
114                         let new_monitor = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
115                                 &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1;
116                         assert!(new_monitor == *monitor);
117                         new_monitor
118                 };
119                 let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
120                 assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
121                 chain_mon
122         };
123         chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200);
124
125         // Set the persister's return value to be a InProgress.
126         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
127
128         // Try to update ChannelMonitor
129         nodes[1].node.claim_funds(preimage);
130         expect_payment_claimed!(nodes[1], payment_hash, 9_000_000);
131         check_added_monitors!(nodes[1], 1);
132
133         let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
134         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
135         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
136         {
137                 let mut node_0_per_peer_lock;
138                 let mut node_0_peer_state_lock;
139                 if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
140                         if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
141                                 // Check that even though the persister is returning a InProgress,
142                                 // because the update is bogus, ultimately the error that's returned
143                                 // should be a PermanentFailure.
144                                 if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
145                                 logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
146                                 assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
147                         } else { assert!(false); }
148                 } else {
149                         assert!(false);
150                 }
151         }
152
153         check_added_monitors!(nodes[0], 1);
154         let events = nodes[0].node.get_and_clear_pending_events();
155         assert_eq!(events.len(), 1);
156 }
157
158 fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
159         // Test that we can recover from a simple temporary monitor update failure optionally with
160         // a disconnect in between
161         let chanmon_cfgs = create_chanmon_cfgs(2);
162         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
163         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
164         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
165         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
166
167         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
168
169         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
170
171         {
172                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_1,
173                                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
174                         ), false, APIError::MonitorUpdateInProgress, {});
175                 check_added_monitors!(nodes[0], 1);
176         }
177
178         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
179         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
180         assert_eq!(nodes[0].node.list_channels().len(), 1);
181
182         if disconnect {
183                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
184                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
185                 let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
186                 reconnect_args.send_channel_ready = (true, true);
187                 reconnect_nodes(reconnect_args);
188         }
189
190         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
191         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
192         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
193         check_added_monitors!(nodes[0], 0);
194
195         let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
196         assert_eq!(events_2.len(), 1);
197         let payment_event = SendEvent::from_event(events_2.pop().unwrap());
198         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
199         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
200         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
201
202         expect_pending_htlcs_forwardable!(nodes[1]);
203
204         let events_3 = nodes[1].node.get_and_clear_pending_events();
205         assert_eq!(events_3.len(), 1);
206         match events_3[0] {
207                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
208                         assert_eq!(payment_hash_1, *payment_hash);
209                         assert_eq!(amount_msat, 1_000_000);
210                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
211                         assert_eq!(via_channel_id, Some(channel_id));
212                         match &purpose {
213                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
214                                         assert!(payment_preimage.is_none());
215                                         assert_eq!(payment_secret_1, *payment_secret);
216                                 },
217                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
218                         }
219                 },
220                 _ => panic!("Unexpected event"),
221         }
222
223         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
224
225         // Now set it to failed again...
226         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000);
227         {
228                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
229                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
230                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
231                         ), false, APIError::MonitorUpdateInProgress, {});
232                 check_added_monitors!(nodes[0], 1);
233         }
234
235         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
236         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
237         assert_eq!(nodes[0].node.list_channels().len(), 1);
238
239         if disconnect {
240                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
241                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
242                 reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
243         }
244
245         // ...and make sure we can force-close a frozen channel
246         nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
247         check_added_monitors!(nodes[0], 1);
248         check_closed_broadcast!(nodes[0], true);
249
250         // TODO: Once we hit the chain with the failure transaction we should check that we get a
251         // PaymentPathFailed event
252
253         assert_eq!(nodes[0].node.list_channels().len(), 0);
254         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
255 }
256
257 #[test]
258 fn test_simple_monitor_temporary_update_fail() {
259         do_test_simple_monitor_temporary_update_fail(false);
260         do_test_simple_monitor_temporary_update_fail(true);
261 }
262
263 fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
264         let disconnect_flags = 8 | 16;
265
266         // Test that we can recover from a temporary monitor update failure with some in-flight
267         // HTLCs going on at the same time potentially with some disconnection thrown in.
268         // * First we route a payment, then get a temporary monitor update failure when trying to
269         //   route a second payment. We then claim the first payment.
270         // * If disconnect_count is set, we will disconnect at this point (which is likely as
271         //   InProgress likely indicates net disconnect which resulted in failing to update the
272         //   ChannelMonitor on a watchtower).
273         // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
274         //   immediately, otherwise we wait disconnect and deliver them via the reconnect
275         //   channel_reestablish processing (ie disconnect_count & 16 makes no sense if
276         //   disconnect_count & !disconnect_flags is 0).
277         // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
278         //   through message sending, potentially disconnect/reconnecting multiple times based on
279         //   disconnect_count, to get the update_fulfill_htlc through.
280         // * We then walk through more message exchanges to get the original update_add_htlc
281         //   through, swapping message ordering based on disconnect_count & 8 and optionally
282         //   disconnect/reconnecting based on disconnect_count.
283         let chanmon_cfgs = create_chanmon_cfgs(2);
284         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
285         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
286         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
287         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
288
289         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
290
291         // Now try to send a second payment which will fail to send
292         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
293         {
294                 chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
295                 unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash_2,
296                                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)
297                         ), false, APIError::MonitorUpdateInProgress, {});
298                 check_added_monitors!(nodes[0], 1);
299         }
300
301         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
302         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
303         assert_eq!(nodes[0].node.list_channels().len(), 1);
304
305         // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
306         // but nodes[0] won't respond since it is frozen.
307         nodes[1].node.claim_funds(payment_preimage_1);
308         check_added_monitors!(nodes[1], 1);
309         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
310
311         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
312         assert_eq!(events_2.len(), 1);
313         let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
314                 MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
315                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
316                         assert!(update_add_htlcs.is_empty());
317                         assert_eq!(update_fulfill_htlcs.len(), 1);
318                         assert!(update_fail_htlcs.is_empty());
319                         assert!(update_fail_malformed_htlcs.is_empty());
320                         assert!(update_fee.is_none());
321
322                         if (disconnect_count & 16) == 0 {
323                                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]);
324                                 let events_3 = nodes[0].node.get_and_clear_pending_events();
325                                 assert_eq!(events_3.len(), 1);
326                                 match events_3[0] {
327                                         Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
328                                                 assert_eq!(*payment_preimage, payment_preimage_1);
329                                                 assert_eq!(*payment_hash, payment_hash_1);
330                                         },
331                                         _ => panic!("Unexpected event"),
332                                 }
333
334                                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
335                                 check_added_monitors!(nodes[0], 1);
336                                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
337                         }
338
339                         (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
340                 },
341                 _ => panic!("Unexpected event"),
342         };
343
344         if disconnect_count & !disconnect_flags > 0 {
345                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
346                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
347         }
348
349         // Now fix monitor updating...
350         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
351         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
352         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
353         check_added_monitors!(nodes[0], 0);
354
355         macro_rules! disconnect_reconnect_peers { () => { {
356                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
357                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
358
359                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
360                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
361                 }, true).unwrap();
362                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
363                 assert_eq!(reestablish_1.len(), 1);
364                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
365                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
366                 }, false).unwrap();
367                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
368                 assert_eq!(reestablish_2.len(), 1);
369
370                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
371                 let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
372                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
373                 let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
374
375                 assert!(as_resp.0.is_none());
376                 assert!(bs_resp.0.is_none());
377
378                 (reestablish_1, reestablish_2, as_resp, bs_resp)
379         } } }
380
381         let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
382                 assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
383                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
384
385                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
386                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
387                 }, true).unwrap();
388                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
389                 assert_eq!(reestablish_1.len(), 1);
390                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
391                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
392                 }, false).unwrap();
393                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
394                 assert_eq!(reestablish_2.len(), 1);
395
396                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
397                 check_added_monitors!(nodes[0], 0);
398                 let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
399                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
400                 check_added_monitors!(nodes[1], 0);
401                 let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
402
403                 assert!(as_resp.0.is_none());
404                 assert!(bs_resp.0.is_none());
405
406                 assert!(bs_resp.1.is_none());
407                 if (disconnect_count & 16) == 0 {
408                         assert!(bs_resp.2.is_none());
409
410                         assert!(as_resp.1.is_some());
411                         assert!(as_resp.2.is_some());
412                         assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
413                 } else {
414                         assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
415                         assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
416                         assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
417                         assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
418                         assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
419                         assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
420
421                         assert!(as_resp.1.is_none());
422
423                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]);
424                         let events_3 = nodes[0].node.get_and_clear_pending_events();
425                         assert_eq!(events_3.len(), 1);
426                         match events_3[0] {
427                                 Event::PaymentSent { ref payment_preimage, ref payment_hash, .. } => {
428                                         assert_eq!(*payment_preimage, payment_preimage_1);
429                                         assert_eq!(*payment_hash, payment_hash_1);
430                                 },
431                                 _ => panic!("Unexpected event"),
432                         }
433
434                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed);
435                         let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
436                         // No commitment_signed so get_event_msg's assert(len == 1) passes
437                         check_added_monitors!(nodes[0], 1);
438
439                         as_resp.1 = Some(as_resp_raa);
440                         bs_resp.2 = None;
441                 }
442
443                 if disconnect_count & !disconnect_flags > 1 {
444                         let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
445
446                         if (disconnect_count & 16) == 0 {
447                                 assert!(reestablish_1 == second_reestablish_1);
448                                 assert!(reestablish_2 == second_reestablish_2);
449                         }
450                         assert!(as_resp == second_as_resp);
451                         assert!(bs_resp == second_bs_resp);
452                 }
453
454                 (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
455         } else {
456                 let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
457                 assert_eq!(events_4.len(), 2);
458                 (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
459                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
460                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
461                                 msg.clone()
462                         },
463                         _ => panic!("Unexpected event"),
464                 })
465         };
466
467         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
468
469         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
470         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
471         let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
472         // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
473         check_added_monitors!(nodes[1], 1);
474
475         if disconnect_count & !disconnect_flags > 2 {
476                 let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
477
478                 assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
479                 assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
480
481                 assert!(as_resp.2.is_none());
482                 assert!(bs_resp.2.is_none());
483         }
484
485         let as_commitment_update;
486         let bs_second_commitment_update;
487
488         macro_rules! handle_bs_raa { () => {
489                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
490                 as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
491                 assert!(as_commitment_update.update_add_htlcs.is_empty());
492                 assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
493                 assert!(as_commitment_update.update_fail_htlcs.is_empty());
494                 assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
495                 assert!(as_commitment_update.update_fee.is_none());
496                 check_added_monitors!(nodes[0], 1);
497         } }
498
499         macro_rules! handle_initial_raa { () => {
500                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack);
501                 bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
502                 assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
503                 assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
504                 assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
505                 assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
506                 assert!(bs_second_commitment_update.update_fee.is_none());
507                 check_added_monitors!(nodes[1], 1);
508         } }
509
510         if (disconnect_count & 8) == 0 {
511                 handle_bs_raa!();
512
513                 if disconnect_count & !disconnect_flags > 3 {
514                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
515
516                         assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
517                         assert!(bs_resp.1.is_none());
518
519                         assert!(as_resp.2.unwrap() == as_commitment_update);
520                         assert!(bs_resp.2.is_none());
521
522                         assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
523                 }
524
525                 handle_initial_raa!();
526
527                 if disconnect_count & !disconnect_flags > 4 {
528                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
529
530                         assert!(as_resp.1.is_none());
531                         assert!(bs_resp.1.is_none());
532
533                         assert!(as_resp.2.unwrap() == as_commitment_update);
534                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
535                 }
536         } else {
537                 handle_initial_raa!();
538
539                 if disconnect_count & !disconnect_flags > 3 {
540                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
541
542                         assert!(as_resp.1.is_none());
543                         assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
544
545                         assert!(as_resp.2.is_none());
546                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
547
548                         assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
549                 }
550
551                 handle_bs_raa!();
552
553                 if disconnect_count & !disconnect_flags > 4 {
554                         let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
555
556                         assert!(as_resp.1.is_none());
557                         assert!(bs_resp.1.is_none());
558
559                         assert!(as_resp.2.unwrap() == as_commitment_update);
560                         assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
561                 }
562         }
563
564         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed);
565         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
566         // No commitment_signed so get_event_msg's assert(len == 1) passes
567         check_added_monitors!(nodes[0], 1);
568
569         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed);
570         let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
571         // No commitment_signed so get_event_msg's assert(len == 1) passes
572         check_added_monitors!(nodes[1], 1);
573
574         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
575         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
576         check_added_monitors!(nodes[1], 1);
577
578         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack);
579         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
580         check_added_monitors!(nodes[0], 1);
581         expect_payment_path_successful!(nodes[0]);
582
583         expect_pending_htlcs_forwardable!(nodes[1]);
584
585         let events_5 = nodes[1].node.get_and_clear_pending_events();
586         assert_eq!(events_5.len(), 1);
587         match events_5[0] {
588                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
589                         assert_eq!(payment_hash_2, *payment_hash);
590                         assert_eq!(amount_msat, 1_000_000);
591                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
592                         assert_eq!(via_channel_id, Some(channel_id));
593                         match &purpose {
594                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
595                                         assert!(payment_preimage.is_none());
596                                         assert_eq!(payment_secret_2, *payment_secret);
597                                 },
598                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
599                         }
600                 },
601                 _ => panic!("Unexpected event"),
602         }
603
604         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
605 }
606
607 #[test]
608 fn test_monitor_temporary_update_fail_a() {
609         do_test_monitor_temporary_update_fail(0);
610         do_test_monitor_temporary_update_fail(1);
611         do_test_monitor_temporary_update_fail(2);
612         do_test_monitor_temporary_update_fail(3);
613         do_test_monitor_temporary_update_fail(4);
614         do_test_monitor_temporary_update_fail(5);
615 }
616
617 #[test]
618 fn test_monitor_temporary_update_fail_b() {
619         do_test_monitor_temporary_update_fail(2 | 8);
620         do_test_monitor_temporary_update_fail(3 | 8);
621         do_test_monitor_temporary_update_fail(4 | 8);
622         do_test_monitor_temporary_update_fail(5 | 8);
623 }
624
625 #[test]
626 fn test_monitor_temporary_update_fail_c() {
627         do_test_monitor_temporary_update_fail(1 | 16);
628         do_test_monitor_temporary_update_fail(2 | 16);
629         do_test_monitor_temporary_update_fail(3 | 16);
630         do_test_monitor_temporary_update_fail(2 | 8 | 16);
631         do_test_monitor_temporary_update_fail(3 | 8 | 16);
632 }
633
634 #[test]
635 fn test_monitor_update_fail_cs() {
636         // Tests handling of a monitor update failure when processing an incoming commitment_signed
637         let chanmon_cfgs = create_chanmon_cfgs(2);
638         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
639         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
640         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
641         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
642
643         let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
644         {
645                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
646                         RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap();
647                 check_added_monitors!(nodes[0], 1);
648         }
649
650         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
651         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
652
653         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
654         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
655         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
656         check_added_monitors!(nodes[1], 1);
657         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
658
659         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
660         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
661         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
662         check_added_monitors!(nodes[1], 0);
663         let responses = nodes[1].node.get_and_clear_pending_msg_events();
664         assert_eq!(responses.len(), 2);
665
666         match responses[0] {
667                 MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
668                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
669                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg);
670                         check_added_monitors!(nodes[0], 1);
671                 },
672                 _ => panic!("Unexpected event"),
673         }
674         match responses[1] {
675                 MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
676                         assert!(updates.update_add_htlcs.is_empty());
677                         assert!(updates.update_fulfill_htlcs.is_empty());
678                         assert!(updates.update_fail_htlcs.is_empty());
679                         assert!(updates.update_fail_malformed_htlcs.is_empty());
680                         assert!(updates.update_fee.is_none());
681                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
682
683                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
684                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
685                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
686                         check_added_monitors!(nodes[0], 1);
687                         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
688                 },
689                 _ => panic!("Unexpected event"),
690         }
691
692         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
693         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
694         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
695         check_added_monitors!(nodes[0], 0);
696
697         let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
698         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
699         check_added_monitors!(nodes[1], 1);
700
701         expect_pending_htlcs_forwardable!(nodes[1]);
702
703         let events = nodes[1].node.get_and_clear_pending_events();
704         assert_eq!(events.len(), 1);
705         match events[0] {
706                 Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
707                         assert_eq!(payment_hash, our_payment_hash);
708                         assert_eq!(amount_msat, 1_000_000);
709                         assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
710                         assert_eq!(via_channel_id, Some(channel_id));
711                         match &purpose {
712                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
713                                         assert!(payment_preimage.is_none());
714                                         assert_eq!(our_payment_secret, *payment_secret);
715                                 },
716                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
717                         }
718                 },
719                 _ => panic!("Unexpected event"),
720         };
721
722         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
723 }
724
725 #[test]
726 fn test_monitor_update_fail_no_rebroadcast() {
727         // Tests handling of a monitor update failure when no message rebroadcasting on
728         // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
729         // fuzz tests.
730         let chanmon_cfgs = create_chanmon_cfgs(2);
731         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
732         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
733         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
734         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
735
736         let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
737         {
738                 nodes[0].node.send_payment_with_route(&route, our_payment_hash,
739                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap();
740                 check_added_monitors!(nodes[0], 1);
741         }
742
743         let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
744         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
745         let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
746
747         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
748         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
749         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
750         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
751         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
752         check_added_monitors!(nodes[1], 1);
753
754         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
755         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
756         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
757         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
758         check_added_monitors!(nodes[1], 0);
759         expect_pending_htlcs_forwardable!(nodes[1]);
760
761         let events = nodes[1].node.get_and_clear_pending_events();
762         assert_eq!(events.len(), 1);
763         match events[0] {
764                 Event::PaymentClaimable { payment_hash, .. } => {
765                         assert_eq!(payment_hash, our_payment_hash);
766                 },
767                 _ => panic!("Unexpected event"),
768         }
769
770         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
771 }
772
773 #[test]
774 fn test_monitor_update_raa_while_paused() {
775         // Tests handling of an RAA while monitor updating has already been marked failed.
776         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
777         let chanmon_cfgs = create_chanmon_cfgs(2);
778         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
779         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
780         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
781         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
782
783         send_payment(&nodes[0], &[&nodes[1]], 5000000);
784         let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
785         {
786                 nodes[0].node.send_payment_with_route(&route, our_payment_hash_1,
787                         RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap();
788                 check_added_monitors!(nodes[0], 1);
789         }
790         let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
791
792         let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000);
793         {
794                 nodes[1].node.send_payment_with_route(&route, our_payment_hash_2,
795                         RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap();
796                 check_added_monitors!(nodes[1], 1);
797         }
798         let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
799
800         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]);
801         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg);
802         check_added_monitors!(nodes[1], 1);
803         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
804
805         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
806         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
807         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
808         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
809         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
810         check_added_monitors!(nodes[0], 1);
811         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
812
813         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
814         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
815         check_added_monitors!(nodes[0], 1);
816
817         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
818         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
819         check_added_monitors!(nodes[0], 0);
820
821         let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
822         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
823         check_added_monitors!(nodes[1], 1);
824         let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
825
826         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1);
827         check_added_monitors!(nodes[1], 1);
828         let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
829
830         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed);
831         check_added_monitors!(nodes[0], 1);
832         let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
833
834         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
835         check_added_monitors!(nodes[0], 1);
836         expect_pending_htlcs_forwardable!(nodes[0]);
837         expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
838
839         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
840         check_added_monitors!(nodes[1], 1);
841         expect_pending_htlcs_forwardable!(nodes[1]);
842         expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
843
844         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
845         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
846 }
847
848 fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
849         // Tests handling of a monitor update failure when processing an incoming RAA
850         let chanmon_cfgs = create_chanmon_cfgs(3);
851         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
852         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
853         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
854         create_announced_chan_between_nodes(&nodes, 0, 1);
855         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
856
857         // Rebalance a bit so that we can send backwards from 2 to 1.
858         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
859
860         // Route a first payment that we'll fail backwards
861         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
862
863         // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
864         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
865         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
866         check_added_monitors!(nodes[2], 1);
867
868         let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
869         assert!(updates.update_add_htlcs.is_empty());
870         assert!(updates.update_fulfill_htlcs.is_empty());
871         assert_eq!(updates.update_fail_htlcs.len(), 1);
872         assert!(updates.update_fail_malformed_htlcs.is_empty());
873         assert!(updates.update_fee.is_none());
874         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
875
876         let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
877         check_added_monitors!(nodes[0], 0);
878
879         // While the second channel is AwaitingRAA, forward a second payment to get it into the
880         // holding cell.
881         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
882         {
883                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
884                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
885                 check_added_monitors!(nodes[0], 1);
886         }
887
888         let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
889         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
890         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
891
892         expect_pending_htlcs_forwardable!(nodes[1]);
893         check_added_monitors!(nodes[1], 0);
894         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
895
896         // Now fail monitor updating.
897         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
898         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
899         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
900         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
901         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
902         check_added_monitors!(nodes[1], 1);
903
904         // Forward a third payment which will also be added to the holding cell, despite the channel
905         // being paused waiting a monitor update.
906         let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000);
907         {
908                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
909                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
910                 check_added_monitors!(nodes[0], 1);
911         }
912
913         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel
914         send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
915         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
916         commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
917         check_added_monitors!(nodes[1], 0);
918
919         // Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
920         // and not forwarded.
921         expect_pending_htlcs_forwardable!(nodes[1]);
922         check_added_monitors!(nodes[1], 0);
923         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
924
925         let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
926                 // Try to route another payment backwards from 2 to make sure 1 holds off on responding
927                 let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
928                 nodes[2].node.send_payment_with_route(&route, payment_hash_4,
929                         RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap();
930                 check_added_monitors!(nodes[2], 1);
931
932                 send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
933                 nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
934                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
935                 check_added_monitors!(nodes[1], 1);
936                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
937                 (Some(payment_preimage_4), Some(payment_hash_4))
938         } else { (None, None) };
939
940         // Restore monitor updating, ensuring we immediately get a fail-back update and a
941         // update_add update.
942         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
943         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
944         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
945         check_added_monitors!(nodes[1], 0);
946         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
947         check_added_monitors!(nodes[1], 1);
948
949         let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
950         if test_ignore_second_cs {
951                 assert_eq!(events_3.len(), 3);
952         } else {
953                 assert_eq!(events_3.len(), 2);
954         }
955
956         // Note that the ordering of the events for different nodes is non-prescriptive, though the
957         // ordering of the two events that both go to nodes[2] have to stay in the same order.
958         let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3);
959         let messages_a = match nodes_0_event {
960                 MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
961                         assert_eq!(node_id, nodes[0].node.get_our_node_id());
962                         assert!(updates.update_fulfill_htlcs.is_empty());
963                         assert_eq!(updates.update_fail_htlcs.len(), 1);
964                         assert!(updates.update_fail_malformed_htlcs.is_empty());
965                         assert!(updates.update_add_htlcs.is_empty());
966                         assert!(updates.update_fee.is_none());
967                         (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
968                 },
969                 _ => panic!("Unexpected event type!"),
970         };
971
972         let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
973         let send_event_b = SendEvent::from_event(nodes_2_event);
974         assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
975
976         let raa = if test_ignore_second_cs {
977                 let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3);
978                 match nodes_2_event {
979                         MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
980                                 assert_eq!(node_id, nodes[2].node.get_our_node_id());
981                                 Some(msg.clone())
982                         },
983                         _ => panic!("Unexpected event"),
984                 }
985         } else { None };
986
987         // Now deliver the new messages...
988
989         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0);
990         commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
991         expect_payment_failed!(nodes[0], payment_hash_1, true);
992
993         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]);
994         let as_cs;
995         if test_ignore_second_cs {
996                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
997                 check_added_monitors!(nodes[2], 1);
998                 let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
999                 nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap());
1000                 check_added_monitors!(nodes[2], 1);
1001                 let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1002                 assert!(bs_cs.update_add_htlcs.is_empty());
1003                 assert!(bs_cs.update_fail_htlcs.is_empty());
1004                 assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
1005                 assert!(bs_cs.update_fulfill_htlcs.is_empty());
1006                 assert!(bs_cs.update_fee.is_none());
1007
1008                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
1009                 check_added_monitors!(nodes[1], 1);
1010                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1011
1012                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed);
1013                 check_added_monitors!(nodes[1], 1);
1014         } else {
1015                 nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg);
1016                 check_added_monitors!(nodes[2], 1);
1017
1018                 let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events();
1019                 // As both messages are for nodes[1], they're in order.
1020                 assert_eq!(bs_revoke_and_commit.len(), 2);
1021                 match bs_revoke_and_commit[0] {
1022                         MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1023                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1024                                 nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &msg);
1025                                 check_added_monitors!(nodes[1], 1);
1026                         },
1027                         _ => panic!("Unexpected event"),
1028                 }
1029
1030                 as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
1031
1032                 match bs_revoke_and_commit[1] {
1033                         MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1034                                 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1035                                 assert!(updates.update_add_htlcs.is_empty());
1036                                 assert!(updates.update_fail_htlcs.is_empty());
1037                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
1038                                 assert!(updates.update_fulfill_htlcs.is_empty());
1039                                 assert!(updates.update_fee.is_none());
1040                                 nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed);
1041                                 check_added_monitors!(nodes[1], 1);
1042                         },
1043                         _ => panic!("Unexpected event"),
1044                 }
1045         }
1046
1047         assert_eq!(as_cs.update_add_htlcs.len(), 1);
1048         assert!(as_cs.update_fail_htlcs.is_empty());
1049         assert!(as_cs.update_fail_malformed_htlcs.is_empty());
1050         assert!(as_cs.update_fulfill_htlcs.is_empty());
1051         assert!(as_cs.update_fee.is_none());
1052         let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1053
1054
1055         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]);
1056         nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed);
1057         check_added_monitors!(nodes[2], 1);
1058         let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1059
1060         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa);
1061         check_added_monitors!(nodes[2], 1);
1062         let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1063
1064         nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa);
1065         check_added_monitors!(nodes[1], 1);
1066         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1067
1068         nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed);
1069         check_added_monitors!(nodes[1], 1);
1070         let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
1071
1072         nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_second_raa);
1073         check_added_monitors!(nodes[2], 1);
1074         assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
1075
1076         expect_pending_htlcs_forwardable!(nodes[2]);
1077
1078         let events_6 = nodes[2].node.get_and_clear_pending_events();
1079         assert_eq!(events_6.len(), 2);
1080         match events_6[0] {
1081                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
1082                 _ => panic!("Unexpected event"),
1083         };
1084         match events_6[1] {
1085                 Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); },
1086                 _ => panic!("Unexpected event"),
1087         };
1088
1089         if test_ignore_second_cs {
1090                 expect_pending_htlcs_forwardable!(nodes[1]);
1091                 check_added_monitors!(nodes[1], 1);
1092
1093                 send_event = SendEvent::from_node(&nodes[1]);
1094                 assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
1095                 assert_eq!(send_event.msgs.len(), 1);
1096                 nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
1097                 commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
1098
1099                 expect_pending_htlcs_forwardable!(nodes[0]);
1100
1101                 let events_9 = nodes[0].node.get_and_clear_pending_events();
1102                 assert_eq!(events_9.len(), 1);
1103                 match events_9[0] {
1104                         Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
1105                         _ => panic!("Unexpected event"),
1106                 };
1107                 claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
1108         }
1109
1110         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
1111 }
1112
1113 #[test]
1114 fn test_monitor_update_fail_raa() {
1115         do_test_monitor_update_fail_raa(false);
1116         do_test_monitor_update_fail_raa(true);
1117 }
1118
1119 #[test]
1120 fn test_monitor_update_fail_reestablish() {
1121         // Simple test for message retransmission after monitor update failure on
1122         // channel_reestablish generating a monitor update (which comes from freeing holding cell
1123         // HTLCs).
1124         let chanmon_cfgs = create_chanmon_cfgs(3);
1125         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1126         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1127         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1128         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1129         create_announced_chan_between_nodes(&nodes, 1, 2);
1130
1131         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
1132
1133         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1134         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1135
1136         nodes[2].node.claim_funds(payment_preimage);
1137         check_added_monitors!(nodes[2], 1);
1138         expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
1139
1140         let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1141         assert!(updates.update_add_htlcs.is_empty());
1142         assert!(updates.update_fail_htlcs.is_empty());
1143         assert!(updates.update_fail_malformed_htlcs.is_empty());
1144         assert!(updates.update_fee.is_none());
1145         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1146         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1147         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
1148         check_added_monitors!(nodes[1], 1);
1149         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1150         commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
1151
1152         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1153         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1154                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1155         }, true).unwrap();
1156         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1157                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1158         }, false).unwrap();
1159
1160         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1161         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1162
1163         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1164
1165         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1166         assert_eq!(
1167                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1168                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1169
1170         nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
1171         check_added_monitors!(nodes[1], 1);
1172
1173         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1174         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1175
1176         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1177                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1178         }, true).unwrap();
1179         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1180                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1181         }, false).unwrap();
1182
1183         assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish);
1184         assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish);
1185
1186         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
1187         assert_eq!(
1188                 get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
1189                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1190
1191         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
1192         check_added_monitors!(nodes[1], 0);
1193         assert_eq!(
1194                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id())
1195                         .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
1196
1197         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1198         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1199         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1200         check_added_monitors!(nodes[1], 0);
1201
1202         updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1203         assert!(updates.update_add_htlcs.is_empty());
1204         assert!(updates.update_fail_htlcs.is_empty());
1205         assert!(updates.update_fail_malformed_htlcs.is_empty());
1206         assert!(updates.update_fee.is_none());
1207         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
1208         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1209         commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
1210         expect_payment_sent!(nodes[0], payment_preimage);
1211 }
1212
1213 #[test]
1214 fn raa_no_response_awaiting_raa_state() {
1215         // This is a rather convoluted test which ensures that if handling of an RAA does not happen
1216         // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
1217         // in question (assuming it intends to respond with a CS after monitor updating is restored).
1218         // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
1219         let chanmon_cfgs = create_chanmon_cfgs(2);
1220         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1221         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1222         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1223         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1224
1225         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1226         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]);
1227         let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]);
1228
1229         // Queue up two payments - one will be delivered right away, one immediately goes into the
1230         // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
1231         // immediately after a CS. By setting failing the monitor update failure from the CS (which
1232         // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
1233         // generation during RAA while in monitor-update-failed state.
1234         {
1235                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1236                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1237                 check_added_monitors!(nodes[0], 1);
1238                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1239                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1240                 check_added_monitors!(nodes[0], 0);
1241         }
1242
1243         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1244         assert_eq!(events.len(), 1);
1245         let payment_event = SendEvent::from_event(events.pop().unwrap());
1246         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1247         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1248         check_added_monitors!(nodes[1], 1);
1249
1250         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1251         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1252         check_added_monitors!(nodes[0], 1);
1253         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1254         assert_eq!(events.len(), 1);
1255         let payment_event = SendEvent::from_event(events.pop().unwrap());
1256
1257         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1258         check_added_monitors!(nodes[0], 1);
1259         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1260
1261         // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
1262         // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
1263         // then restore channel monitor updates.
1264         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1265         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1266         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1267         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1268         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1269         check_added_monitors!(nodes[1], 1);
1270         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1271
1272         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1273         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1274         check_added_monitors!(nodes[1], 1);
1275
1276         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1277         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1278         // nodes[1] should be AwaitingRAA here!
1279         check_added_monitors!(nodes[1], 0);
1280         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1281         expect_pending_htlcs_forwardable!(nodes[1]);
1282         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1283
1284         // We send a third payment here, which is somewhat of a redundant test, but the
1285         // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
1286         // commitment transaction states) whereas here we can explicitly check for it.
1287         {
1288                 nodes[0].node.send_payment_with_route(&route, payment_hash_3,
1289                         RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1290                 check_added_monitors!(nodes[0], 0);
1291                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1292         }
1293         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1294         check_added_monitors!(nodes[0], 1);
1295         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1296         assert_eq!(events.len(), 1);
1297         let payment_event = SendEvent::from_event(events.pop().unwrap());
1298
1299         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1300         check_added_monitors!(nodes[0], 1);
1301         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1302
1303         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1304         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1305         check_added_monitors!(nodes[1], 1);
1306         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1307
1308         // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
1309         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1310         check_added_monitors!(nodes[1], 1);
1311         expect_pending_htlcs_forwardable!(nodes[1]);
1312         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1313         let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1314
1315         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1316         check_added_monitors!(nodes[0], 1);
1317
1318         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed);
1319         check_added_monitors!(nodes[0], 1);
1320         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1321
1322         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1323         check_added_monitors!(nodes[1], 1);
1324         expect_pending_htlcs_forwardable!(nodes[1]);
1325         expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
1326
1327         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1328         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1329         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
1330 }
1331
1332 #[test]
1333 fn claim_while_disconnected_monitor_update_fail() {
1334         // Test for claiming a payment while disconnected and then having the resulting
1335         // channel-update-generated monitor update fail. This kind of thing isn't a particularly
1336         // contrived case for nodes with network instability.
1337         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1338         // code introduced a regression in this test (specifically, this caught a removal of the
1339         // channel_reestablish handling ensuring the order was sensical given the messages used).
1340         let chanmon_cfgs = create_chanmon_cfgs(2);
1341         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1342         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1343         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1344         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1345
1346         // Forward a payment for B to claim
1347         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1348
1349         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1350         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1351
1352         nodes[1].node.claim_funds(payment_preimage_1);
1353         check_added_monitors!(nodes[1], 1);
1354         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1355
1356         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1357                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1358         }, true).unwrap();
1359         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1360                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1361         }, false).unwrap();
1362
1363         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1364         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1365
1366         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1367         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1368
1369         // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
1370         // update.
1371         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1372
1373         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1374         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1375         check_added_monitors!(nodes[1], 1);
1376         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1377
1378         // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
1379         // the monitor still failed
1380         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1381         {
1382                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1383                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1384                 check_added_monitors!(nodes[0], 1);
1385         }
1386
1387         let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1388         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
1389         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
1390         check_added_monitors!(nodes[1], 1);
1391         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1392         // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
1393         // until we've channel_monitor_update'd and updated for the new commitment transaction.
1394
1395         // Now un-fail the monitor, which will result in B sending its original commitment update,
1396         // receiving the commitment update from A, and the resulting commitment dances.
1397         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1398         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1399         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1400         check_added_monitors!(nodes[1], 0);
1401
1402         let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
1403         assert_eq!(bs_msgs.len(), 2);
1404
1405         match bs_msgs[0] {
1406                 MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
1407                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1408                         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
1409                         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
1410                         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
1411                         check_added_monitors!(nodes[0], 1);
1412
1413                         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1414                         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1415                         check_added_monitors!(nodes[1], 1);
1416                 },
1417                 _ => panic!("Unexpected event"),
1418         }
1419
1420         match bs_msgs[1] {
1421                 MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
1422                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1423                         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg);
1424                         check_added_monitors!(nodes[0], 1);
1425                 },
1426                 _ => panic!("Unexpected event"),
1427         }
1428
1429         let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
1430
1431         let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1432         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed);
1433         check_added_monitors!(nodes[0], 1);
1434         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1435
1436         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed);
1437         check_added_monitors!(nodes[1], 1);
1438         let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
1439         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1440         check_added_monitors!(nodes[1], 1);
1441
1442         expect_pending_htlcs_forwardable!(nodes[1]);
1443         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1444
1445         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
1446         check_added_monitors!(nodes[0], 1);
1447         expect_payment_path_successful!(nodes[0]);
1448
1449         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1450 }
1451
1452 #[test]
1453 fn monitor_failed_no_reestablish_response() {
1454         // Test for receiving a channel_reestablish after a monitor update failure resulted in no
1455         // response to a commitment_signed.
1456         // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
1457         // debug_assert!() failure in channel_reestablish handling.
1458         let chanmon_cfgs = create_chanmon_cfgs(2);
1459         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1460         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1461         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1462         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1463         {
1464                 let mut node_0_per_peer_lock;
1465                 let mut node_0_peer_state_lock;
1466                 get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1467         }
1468         {
1469                 let mut node_1_per_peer_lock;
1470                 let mut node_1_peer_state_lock;
1471                 get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
1472         }
1473
1474         // Route the payment and deliver the initial commitment_signed (with a monitor update failure
1475         // on receipt).
1476         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1477         {
1478                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1479                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1480                 check_added_monitors!(nodes[0], 1);
1481         }
1482
1483         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1484         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1485         assert_eq!(events.len(), 1);
1486         let payment_event = SendEvent::from_event(events.pop().unwrap());
1487         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1488         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1489         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1490         check_added_monitors!(nodes[1], 1);
1491
1492         // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
1493         // is still failing to update monitors.
1494         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1495         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1496
1497         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
1498                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
1499         }, true).unwrap();
1500         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
1501                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
1502         }, false).unwrap();
1503
1504         let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
1505         let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
1506
1507         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
1508         let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1509         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
1510         let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
1511
1512         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1513         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1514         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1515         check_added_monitors!(nodes[1], 0);
1516         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1517
1518         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1519         check_added_monitors!(nodes[0], 1);
1520         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1521         check_added_monitors!(nodes[0], 1);
1522
1523         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1524         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1525         check_added_monitors!(nodes[1], 1);
1526
1527         expect_pending_htlcs_forwardable!(nodes[1]);
1528         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1529
1530         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1531 }
1532
1533 #[test]
1534 fn first_message_on_recv_ordering() {
1535         // Test that if the initial generator of a monitor-update-frozen state doesn't generate
1536         // messages, we're willing to flip the order of response messages if neccessary in resposne to
1537         // a commitment_signed which needs to send an RAA first.
1538         // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
1539         // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
1540         // response. To do this, we start routing two payments, with the final RAA for the first being
1541         // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
1542         // have no pending response but will want to send a RAA/CS (with the updates for the second
1543         // payment applied).
1544         // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
1545         let chanmon_cfgs = create_chanmon_cfgs(2);
1546         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1547         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1548         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1549         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1550
1551         // Route the first payment outbound, holding the last RAA for B until we are set up so that we
1552         // can deliver it and fail the monitor update.
1553         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1554         {
1555                 nodes[0].node.send_payment_with_route(&route, payment_hash_1,
1556                         RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
1557                 check_added_monitors!(nodes[0], 1);
1558         }
1559
1560         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1561         assert_eq!(events.len(), 1);
1562         let payment_event = SendEvent::from_event(events.pop().unwrap());
1563         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1564         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1565         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1566         check_added_monitors!(nodes[1], 1);
1567         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1568
1569         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1570         check_added_monitors!(nodes[0], 1);
1571         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1572         check_added_monitors!(nodes[0], 1);
1573
1574         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1575
1576         // Route the second payment, generating an update_add_htlc/commitment_signed
1577         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1578         {
1579                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1580                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1581                 check_added_monitors!(nodes[0], 1);
1582         }
1583         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1584         assert_eq!(events.len(), 1);
1585         let payment_event = SendEvent::from_event(events.pop().unwrap());
1586         assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
1587
1588         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1589
1590         // Deliver the final RAA for the first payment, which does not require a response. RAAs
1591         // generally require a commitment_signed, so the fact that we're expecting an opposite response
1592         // to the next message also tests resetting the delivery order.
1593         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1594         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1595         check_added_monitors!(nodes[1], 1);
1596
1597         // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
1598         // RAA/CS response, which should be generated when we call channel_monitor_update (with the
1599         // appropriate HTLC acceptance).
1600         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1601         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
1602         check_added_monitors!(nodes[1], 1);
1603         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1604
1605         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1606         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1607         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1608         check_added_monitors!(nodes[1], 0);
1609
1610         expect_pending_htlcs_forwardable!(nodes[1]);
1611         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
1612
1613         let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1614         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
1615         check_added_monitors!(nodes[0], 1);
1616         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1);
1617         check_added_monitors!(nodes[0], 1);
1618
1619         let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
1620         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1621         check_added_monitors!(nodes[1], 1);
1622
1623         expect_pending_htlcs_forwardable!(nodes[1]);
1624         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1625
1626         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
1627         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1628 }
1629
1630 #[test]
1631 fn test_monitor_update_fail_claim() {
1632         // Basic test for monitor update failures when processing claim_funds calls.
1633         // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
1634         // update to claim the payment. We then send two payments C->B->A, which are held at B.
1635         // Finally, we restore the channel monitor updating and claim the payment on B, forwarding
1636         // the payments from C onwards to A.
1637         let chanmon_cfgs = create_chanmon_cfgs(3);
1638         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1639         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1640         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1641         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1642         create_announced_chan_between_nodes(&nodes, 1, 2);
1643
1644         // Rebalance a bit so that we can send backwards from 3 to 2.
1645         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1646
1647         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1648
1649         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1650         nodes[1].node.claim_funds(payment_preimage_1);
1651         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1652         check_added_monitors!(nodes[1], 1);
1653
1654         // Note that at this point there is a pending commitment transaction update for A being held by
1655         // B. Even when we go to send the payment from C through B to A, B will not update this
1656         // already-signed commitment transaction and will instead wait for it to resolve before
1657         // forwarding the payment onwards.
1658
1659         let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000);
1660         {
1661                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1662                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1663                 check_added_monitors!(nodes[2], 1);
1664         }
1665
1666         // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
1667         // paused, so forward shouldn't succeed until we call channel_monitor_updated().
1668         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1669
1670         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1671         assert_eq!(events.len(), 1);
1672         let payment_event = SendEvent::from_event(events.pop().unwrap());
1673         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1674         let events = nodes[1].node.get_and_clear_pending_msg_events();
1675         assert_eq!(events.len(), 0);
1676         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1677         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
1678
1679         let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]);
1680         nodes[2].node.send_payment_with_route(&route, payment_hash_3,
1681                 RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap();
1682         check_added_monitors!(nodes[2], 1);
1683
1684         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1685         assert_eq!(events.len(), 1);
1686         let payment_event = SendEvent::from_event(events.pop().unwrap());
1687         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1688         let events = nodes[1].node.get_and_clear_pending_msg_events();
1689         assert_eq!(events.len(), 0);
1690         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
1691
1692         // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
1693         let channel_id = chan_1.2;
1694         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1695         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1696         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1697         check_added_monitors!(nodes[1], 0);
1698
1699         let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1700         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
1701         commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
1702         expect_payment_sent!(nodes[0], payment_preimage_1);
1703
1704         // Get the payment forwards, note that they were batched into one commitment update.
1705         nodes[1].node.process_pending_htlc_forwards();
1706         check_added_monitors!(nodes[1], 1);
1707         let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1708         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
1709         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
1710         commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
1711         expect_pending_htlcs_forwardable!(nodes[0]);
1712
1713         let events = nodes[0].node.get_and_clear_pending_events();
1714         assert_eq!(events.len(), 2);
1715         match events[0] {
1716                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id, .. } => {
1717                         assert_eq!(payment_hash_2, *payment_hash);
1718                         assert_eq!(1_000_000, amount_msat);
1719                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1720                         assert_eq!(via_channel_id, Some(channel_id));
1721                         assert_eq!(via_user_channel_id, Some(42));
1722                         match &purpose {
1723                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1724                                         assert!(payment_preimage.is_none());
1725                                         assert_eq!(payment_secret_2, *payment_secret);
1726                                 },
1727                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1728                         }
1729                 },
1730                 _ => panic!("Unexpected event"),
1731         }
1732         match events[1] {
1733                 Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, .. } => {
1734                         assert_eq!(payment_hash_3, *payment_hash);
1735                         assert_eq!(1_000_000, amount_msat);
1736                         assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
1737                         assert_eq!(via_channel_id, Some(channel_id));
1738                         match &purpose {
1739                                 PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
1740                                         assert!(payment_preimage.is_none());
1741                                         assert_eq!(payment_secret_3, *payment_secret);
1742                                 },
1743                                 _ => panic!("expected PaymentPurpose::InvoicePayment")
1744                         }
1745                 },
1746                 _ => panic!("Unexpected event"),
1747         }
1748 }
1749
1750 #[test]
1751 fn test_monitor_update_on_pending_forwards() {
1752         // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
1753         // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
1754         // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
1755         // from C to A will be pending a forward to A.
1756         let chanmon_cfgs = create_chanmon_cfgs(3);
1757         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
1758         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
1759         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
1760         let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
1761         let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
1762
1763         // Rebalance a bit so that we can send backwards from 3 to 1.
1764         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
1765
1766         let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
1767         nodes[2].node.fail_htlc_backwards(&payment_hash_1);
1768         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
1769         check_added_monitors!(nodes[2], 1);
1770
1771         let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
1772         nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]);
1773         commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
1774         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1775
1776         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000);
1777         {
1778                 nodes[2].node.send_payment_with_route(&route, payment_hash_2,
1779                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1780                 check_added_monitors!(nodes[2], 1);
1781         }
1782
1783         let mut events = nodes[2].node.get_and_clear_pending_msg_events();
1784         assert_eq!(events.len(), 1);
1785         let payment_event = SendEvent::from_event(events.pop().unwrap());
1786         nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]);
1787         commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
1788
1789         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1790         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
1791         check_added_monitors!(nodes[1], 1);
1792
1793         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1794         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
1795         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1796         check_added_monitors!(nodes[1], 0);
1797
1798         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1799         nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
1800         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]);
1801         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
1802
1803         let events = nodes[0].node.get_and_clear_pending_events();
1804         assert_eq!(events.len(), 3);
1805         if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] {
1806                 assert_eq!(payment_hash, payment_hash_1);
1807                 assert!(payment_failed_permanently);
1808         } else { panic!("Unexpected event!"); }
1809         match events[2] {
1810                 Event::PaymentFailed { payment_hash, .. } => {
1811                         assert_eq!(payment_hash, payment_hash_1);
1812                 },
1813                 _ => panic!("Unexpected event"),
1814         }
1815         match events[0] {
1816                 Event::PendingHTLCsForwardable { .. } => { },
1817                 _ => panic!("Unexpected event"),
1818         };
1819         nodes[0].node.process_pending_htlc_forwards();
1820         expect_payment_claimable!(nodes[0], payment_hash_2, payment_secret_2, 1000000);
1821
1822         claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
1823 }
1824
1825 #[test]
1826 fn monitor_update_claim_fail_no_response() {
1827         // Test for claim_funds resulting in both a monitor update failure and no message response (due
1828         // to channel being AwaitingRAA).
1829         // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
1830         // code was broken.
1831         let chanmon_cfgs = create_chanmon_cfgs(2);
1832         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1833         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1834         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1835         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
1836
1837         // Forward a payment for B to claim
1838         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
1839
1840         // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
1841         let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
1842         {
1843                 nodes[0].node.send_payment_with_route(&route, payment_hash_2,
1844                         RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
1845                 check_added_monitors!(nodes[0], 1);
1846         }
1847
1848         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
1849         assert_eq!(events.len(), 1);
1850         let payment_event = SendEvent::from_event(events.pop().unwrap());
1851         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
1852         let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
1853
1854         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1855         nodes[1].node.claim_funds(payment_preimage_1);
1856         check_added_monitors!(nodes[1], 1);
1857
1858         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1859
1860         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1861         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1862         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1863         expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1864         check_added_monitors!(nodes[1], 0);
1865         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1866
1867         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
1868         check_added_monitors!(nodes[1], 1);
1869         expect_pending_htlcs_forwardable!(nodes[1]);
1870         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
1871
1872         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
1873         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
1874         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
1875         expect_payment_sent!(nodes[0], payment_preimage_1);
1876
1877         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
1878 }
1879
1880 // restore_b_before_conf has no meaning if !confirm_a_first
1881 // restore_b_before_lock has no meaning if confirm_a_first
1882 fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) {
1883         // Test that if the monitor update generated by funding_transaction_generated fails we continue
1884         // the channel setup happily after the update is restored.
1885         let chanmon_cfgs = create_chanmon_cfgs(2);
1886         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1887         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1888         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1889
1890         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
1891         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
1892         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
1893
1894         let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
1895
1896         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
1897         check_added_monitors!(nodes[0], 0);
1898
1899         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1900         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
1901         let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
1902         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
1903         check_added_monitors!(nodes[1], 1);
1904
1905         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
1906         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1907         check_added_monitors!(nodes[0], 1);
1908         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1909         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
1910         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1911         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1912         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1913         check_added_monitors!(nodes[0], 0);
1914         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
1915
1916         let events = nodes[0].node.get_and_clear_pending_events();
1917         assert_eq!(events.len(), 0);
1918         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
1919         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
1920
1921         if confirm_a_first {
1922                 confirm_transaction(&nodes[0], &funding_tx);
1923                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1924                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1925                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1926         } else {
1927                 assert!(!restore_b_before_conf);
1928                 confirm_transaction(&nodes[1], &funding_tx);
1929                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1930         }
1931
1932         // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
1933         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
1934         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
1935         let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
1936         reconnect_args.send_channel_ready.1 = confirm_a_first;
1937         reconnect_nodes(reconnect_args);
1938
1939         // But we want to re-emit ChannelPending
1940         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
1941         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1942         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1943
1944         if !restore_b_before_conf {
1945                 confirm_transaction(&nodes[1], &funding_tx);
1946                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1947                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1948         }
1949         if !confirm_a_first && !restore_b_before_lock {
1950                 confirm_transaction(&nodes[0], &funding_tx);
1951                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()));
1952                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1953                 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1954         }
1955
1956         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
1957         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
1958         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
1959         check_added_monitors!(nodes[1], 0);
1960
1961         let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1962                 if !restore_b_before_lock {
1963                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1964                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1965                 } else {
1966                         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()));
1967                         confirm_transaction(&nodes[0], &funding_tx);
1968                         let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1969                         (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready))
1970                 }
1971         } else {
1972                 if restore_b_before_conf {
1973                         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1974                         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1975                         confirm_transaction(&nodes[1], &funding_tx);
1976                 }
1977                 let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1978                 (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready))
1979         };
1980         for node in nodes.iter() {
1981                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
1982                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
1983                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
1984         }
1985
1986         if !restore_b_before_lock {
1987                 expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
1988         } else {
1989                 expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
1990         }
1991
1992
1993         send_payment(&nodes[0], &[&nodes[1]], 8000000);
1994         close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1995         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1996         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
1997 }
1998
1999 #[test]
2000 fn during_funding_monitor_fail() {
2001         do_during_funding_monitor_fail(true, true, false);
2002         do_during_funding_monitor_fail(true, false, false);
2003         do_during_funding_monitor_fail(false, false, false);
2004         do_during_funding_monitor_fail(false, false, true);
2005 }
2006
2007 #[test]
2008 fn test_path_paused_mpp() {
2009         // Simple test of sending a multi-part payment where one path is currently blocked awaiting
2010         // monitor update
2011         let chanmon_cfgs = create_chanmon_cfgs(4);
2012         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
2013         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
2014         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
2015
2016         let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
2017         let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2);
2018         let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
2019         let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
2020
2021         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
2022
2023         // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
2024         let path = route.paths[0].clone();
2025         route.paths.push(path);
2026         route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
2027         route.paths[0].hops[0].short_channel_id = chan_1_id;
2028         route.paths[0].hops[1].short_channel_id = chan_3_id;
2029         route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
2030         route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id;
2031         route.paths[1].hops[1].short_channel_id = chan_4_id;
2032
2033         // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
2034         // (for the path 0 -> 2 -> 3) fails.
2035         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2036         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2037
2038         // Now check that we get the right return value, indicating that the first path succeeded but
2039         // the second got a MonitorUpdateInProgress err. This implies
2040         // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry.
2041         if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route(
2042                 &route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
2043         ) {
2044                 assert_eq!(results.len(), 2);
2045                 if let Ok(()) = results[0] {} else { panic!(); }
2046                 if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); }
2047         } else { panic!(); }
2048         check_added_monitors!(nodes[0], 2);
2049         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2050
2051         // Pass the first HTLC of the payment along to nodes[3].
2052         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2053         assert_eq!(events.len(), 1);
2054         pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
2055
2056         // And check that, after we successfully update the monitor for chan_2 we can pass the second
2057         // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
2058         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
2059         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2060         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2061         assert_eq!(events.len(), 1);
2062         pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
2063
2064         claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
2065 }
2066
2067 #[test]
2068 fn test_pending_update_fee_ack_on_reconnect() {
2069         // In early versions of our automated fee update patch, nodes did not correctly use the
2070         // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an
2071         // undelivered commitment_signed.
2072         //
2073         // B sends A new HTLC + CS, not delivered
2074         // A sends B update_fee + CS
2075         // B receives the CS and sends RAA, previously causing B to lock in the new feerate
2076         // reconnect
2077         // B resends initial CS, using the original fee
2078
2079         let chanmon_cfgs = create_chanmon_cfgs(2);
2080         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2081         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2082         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2083
2084         create_announced_chan_between_nodes(&nodes, 0, 1);
2085         send_payment(&nodes[0], &[&nodes[1]], 100_000_00);
2086
2087         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000);
2088         nodes[1].node.send_payment_with_route(&route, payment_hash,
2089                 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
2090         check_added_monitors!(nodes[1], 1);
2091         let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2092         // bs_initial_send_msgs are not delivered until they are re-generated after reconnect
2093
2094         {
2095                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2096                 *feerate_lock *= 2;
2097         }
2098         nodes[0].node.timer_tick_occurred();
2099         check_added_monitors!(nodes[0], 1);
2100         let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2101         assert!(as_update_fee_msgs.update_fee.is_some());
2102
2103         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap());
2104         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed);
2105         check_added_monitors!(nodes[1], 1);
2106         let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2107         // bs_first_raa is not delivered until it is re-generated after reconnect
2108
2109         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2110         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2111
2112         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2113                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2114         }, true).unwrap();
2115         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2116         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2117                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2118         }, false).unwrap();
2119         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2120
2121         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2122         let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events();
2123         assert_eq!(bs_resend_msgs.len(), 3);
2124         if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] {
2125                 assert_eq!(*updates, bs_initial_send_msgs);
2126         } else { panic!(); }
2127         if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] {
2128                 assert_eq!(*msg, bs_first_raa);
2129         } else { panic!(); }
2130         if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); }
2131
2132         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2133         get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
2134
2135         nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]);
2136         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed);
2137         check_added_monitors!(nodes[0], 1);
2138         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2139         check_added_monitors!(nodes[1], 1);
2140         let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed;
2141
2142         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2143         check_added_monitors!(nodes[0], 1);
2144         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed);
2145         check_added_monitors!(nodes[1], 1);
2146         let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2147
2148         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs);
2149         check_added_monitors!(nodes[0], 1);
2150         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
2151         check_added_monitors!(nodes[0], 1);
2152
2153         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2154         check_added_monitors!(nodes[1], 1);
2155
2156         expect_pending_htlcs_forwardable!(nodes[0]);
2157         expect_payment_claimable!(nodes[0], payment_hash, payment_secret, 1_000_000);
2158
2159         claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
2160 }
2161
2162 #[test]
2163 fn test_fail_htlc_on_broadcast_after_claim() {
2164         // In an earlier version of 7e78fa660cec8a73286c94c1073ee588140e7a01 we'd also fail the inbound
2165         // channel backwards if we received an HTLC failure after a HTLC fulfillment. Here we test a
2166         // specific case of that by having the HTLC failure come from the ChannelMonitor after a dust
2167         // HTLC was not included in a confirmed commitment transaction.
2168         //
2169         // We first forward a payment, then claim it with an update_fulfill_htlc message, closing the
2170         // channel immediately before commitment occurs. After the commitment transaction reaches
2171         // ANTI_REORG_DELAY confirmations, will will try to fail the HTLC which was already fulfilled.
2172         let chanmon_cfgs = create_chanmon_cfgs(3);
2173         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2174         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2175         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2176
2177         create_announced_chan_between_nodes(&nodes, 0, 1);
2178         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2179
2180         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
2181
2182         let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
2183         assert_eq!(bs_txn.len(), 1);
2184
2185         nodes[2].node.claim_funds(payment_preimage);
2186         check_added_monitors!(nodes[2], 1);
2187         expect_payment_claimed!(nodes[2], payment_hash, 2000);
2188
2189         let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2190         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
2191         let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2192         check_added_monitors!(nodes[1], 1);
2193         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2194
2195         mine_transaction(&nodes[1], &bs_txn[0]);
2196         check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
2197         check_closed_broadcast!(nodes[1], true);
2198         connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2199         check_added_monitors!(nodes[1], 1);
2200         expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2201
2202         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
2203         expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2204         commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
2205         expect_payment_path_successful!(nodes[0]);
2206 }
2207
2208 fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) {
2209         // In early versions we did not handle resending of update_fee on reconnect correctly. The
2210         // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases
2211         // explicitly here.
2212         let chanmon_cfgs = create_chanmon_cfgs(2);
2213         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2214         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2215         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2216
2217         create_announced_chan_between_nodes(&nodes, 0, 1);
2218         send_payment(&nodes[0], &[&nodes[1]], 1000);
2219
2220         {
2221                 let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2222                 *feerate_lock += 20;
2223         }
2224         nodes[0].node.timer_tick_occurred();
2225         check_added_monitors!(nodes[0], 1);
2226         let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2227         assert!(update_msgs.update_fee.is_some());
2228         if deliver_update {
2229                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2230         }
2231
2232         if parallel_updates {
2233                 {
2234                         let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
2235                         *feerate_lock += 20;
2236                 }
2237                 nodes[0].node.timer_tick_occurred();
2238                 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2239         }
2240
2241         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2242         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2243
2244         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2245                 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2246         }, true).unwrap();
2247         let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
2248         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2249                 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2250         }, false).unwrap();
2251         let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
2252
2253         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg);
2254         get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
2255         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2256
2257         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg);
2258         let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events();
2259         assert_eq!(as_reconnect_msgs.len(), 2);
2260         if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); }
2261         let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap()
2262                 { updates } else { panic!(); };
2263         assert!(update_msgs.update_fee.is_some());
2264         nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap());
2265         if parallel_updates {
2266                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed);
2267                 check_added_monitors!(nodes[1], 1);
2268                 let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2269                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
2270                 check_added_monitors!(nodes[0], 1);
2271                 let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2272
2273                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs);
2274                 check_added_monitors!(nodes[0], 1);
2275                 let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2276
2277                 nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap());
2278                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed);
2279                 check_added_monitors!(nodes[1], 1);
2280                 let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
2281
2282                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
2283                 let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2284                 check_added_monitors!(nodes[1], 1);
2285
2286                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
2287                 check_added_monitors!(nodes[0], 1);
2288
2289                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed);
2290                 check_added_monitors!(nodes[0], 1);
2291                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2292
2293                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
2294                 check_added_monitors!(nodes[1], 1);
2295         } else {
2296                 commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false);
2297         }
2298
2299         send_payment(&nodes[0], &[&nodes[1]], 1000);
2300 }
2301 #[test]
2302 fn update_fee_resend_test() {
2303         do_update_fee_resend_test(false, false);
2304         do_update_fee_resend_test(true, false);
2305         do_update_fee_resend_test(false, true);
2306         do_update_fee_resend_test(true, true);
2307 }
2308
2309 fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
2310         // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
2311         // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
2312         // that behavior was both somewhat unexpected and also broken (there was a debug assertion
2313         // which failed in such a case).
2314         let chanmon_cfgs = create_chanmon_cfgs(2);
2315         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2316         let persister;
2317         let new_chain_monitor;
2318         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2319         let nodes_0_deserialized;
2320         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2321
2322         let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2;
2323         let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000);
2324         let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
2325
2326         // Do a really complicated dance to get an HTLC into the holding cell, with
2327         // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any
2328         // attempts to send an HTLC while MonitorUpdateInProgress is set are immediately
2329         // failed-backwards. Thus, the only way to get an AddHTLC into the holding cell is to add it
2330         // while AwaitingRemoteRevoke is set but MonitorUpdateInProgress is unset, and then swap the
2331         // flags.
2332         //
2333         // We do this by:
2334         //  a) routing a payment from node B to node A,
2335         //  b) sending a payment from node A to node B without delivering any of the generated messages,
2336         //     putting node A in AwaitingRemoteRevoke,
2337         //  c) sending a second payment from node A to node B, which is immediately placed in the
2338         //     holding cell,
2339         //  d) claiming the first payment from B, allowing us to fail the monitor update which occurs
2340         //     when we try to persist the payment preimage,
2341         //  e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
2342         //     clearing AwaitingRemoteRevoke on node A.
2343         //
2344         // Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
2345         // (c) will not be freed from the holding cell.
2346         let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
2347
2348         nodes[0].node.send_payment_with_route(&route, payment_hash_1,
2349                 RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
2350         check_added_monitors!(nodes[0], 1);
2351         let send = SendEvent::from_node(&nodes[0]);
2352         assert_eq!(send.msgs.len(), 1);
2353
2354         nodes[0].node.send_payment_with_route(&route, payment_hash_2,
2355                 RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
2356         check_added_monitors!(nodes[0], 0);
2357
2358         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
2359         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2360         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2361         nodes[0].node.claim_funds(payment_preimage_0);
2362         check_added_monitors!(nodes[0], 1);
2363
2364         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
2365         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
2366         check_added_monitors!(nodes[1], 1);
2367
2368         let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2369
2370         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
2371         check_added_monitors!(nodes[0], 1);
2372
2373         if disconnect {
2374                 // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
2375                 // disconnect the peers. Note that the fuzzer originally found this issue because
2376                 // deserializing a ChannelManager in this state causes an assertion failure.
2377                 if reload_a {
2378                         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
2379                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2380                         persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2381                 } else {
2382                         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2383                 }
2384                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
2385
2386                 // Now reconnect the two
2387                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
2388                         features: nodes[1].node.init_features(), networks: None, remote_network_address: None
2389                 }, true).unwrap();
2390                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
2391                 assert_eq!(reestablish_1.len(), 1);
2392                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
2393                         features: nodes[0].node.init_features(), networks: None, remote_network_address: None
2394                 }, false).unwrap();
2395                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
2396                 assert_eq!(reestablish_2.len(), 1);
2397
2398                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
2399                 let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
2400                 check_added_monitors!(nodes[1], 0);
2401
2402                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
2403                 let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
2404
2405                 assert!(resp_0.0.is_none());
2406                 assert!(resp_0.1.is_none());
2407                 assert!(resp_0.2.is_none());
2408                 assert!(resp_1.0.is_none());
2409                 assert!(resp_1.1.is_none());
2410
2411                 // Check that the freshly-generated cs is equal to the original (which we will deliver in a
2412                 // moment).
2413                 if let Some(pending_cs) = resp_1.2 {
2414                         assert!(pending_cs.update_add_htlcs.is_empty());
2415                         assert!(pending_cs.update_fail_htlcs.is_empty());
2416                         assert!(pending_cs.update_fulfill_htlcs.is_empty());
2417                         assert_eq!(pending_cs.commitment_signed, cs);
2418                 } else { panic!(); }
2419
2420                 if reload_a {
2421                         // The two pending monitor updates were replayed (but are still pending).
2422                         check_added_monitors(&nodes[0], 2);
2423                 } else {
2424                         // There should be no monitor updates as we are still pending awaiting a failed one.
2425                         check_added_monitors(&nodes[0], 0);
2426                 }
2427                 check_added_monitors(&nodes[1], 0);
2428         }
2429
2430         // If we finish updating the monitor, we should free the holding cell right away (this did
2431         // not occur prior to #756).
2432         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2433         let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
2434         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id);
2435         expect_payment_claimed!(nodes[0], payment_hash_0, 100_000);
2436
2437         // New outbound messages should be generated immediately upon a call to
2438         // get_and_clear_pending_msg_events (but not before).
2439         check_added_monitors!(nodes[0], 0);
2440         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
2441         check_added_monitors!(nodes[0], 1);
2442         assert_eq!(events.len(), 1);
2443
2444         // Deliver the pending in-flight CS
2445         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
2446         check_added_monitors!(nodes[0], 1);
2447
2448         let commitment_msg = match events.pop().unwrap() {
2449                 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2450                         assert_eq!(node_id, nodes[1].node.get_our_node_id());
2451                         assert!(updates.update_fail_htlcs.is_empty());
2452                         assert!(updates.update_fail_malformed_htlcs.is_empty());
2453                         assert!(updates.update_fee.is_none());
2454                         assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2455                         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
2456                         expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
2457                         assert_eq!(updates.update_add_htlcs.len(), 1);
2458                         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
2459                         updates.commitment_signed
2460                 },
2461                 _ => panic!("Unexpected event type!"),
2462         };
2463
2464         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
2465         check_added_monitors!(nodes[1], 1);
2466
2467         let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
2468         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
2469         expect_pending_htlcs_forwardable!(nodes[1]);
2470         expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
2471         check_added_monitors!(nodes[1], 1);
2472
2473         commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);
2474
2475         let events = nodes[1].node.get_and_clear_pending_events();
2476         assert_eq!(events.len(), 2);
2477         match events[0] {
2478                 Event::PendingHTLCsForwardable { .. } => { },
2479                 _ => panic!("Unexpected event"),
2480         };
2481         match events[1] {
2482                 Event::PaymentPathSuccessful { .. } => { },
2483                 _ => panic!("Unexpected event"),
2484         };
2485
2486         nodes[1].node.process_pending_htlc_forwards();
2487         expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 100000);
2488
2489         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
2490         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
2491 }
2492 #[test]
2493 fn channel_holding_cell_serialize() {
2494         do_channel_holding_cell_serialize(true, true);
2495         do_channel_holding_cell_serialize(true, false);
2496         do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
2497 }
2498
2499 #[derive(PartialEq)]
2500 enum HTLCStatusAtDupClaim {
2501         Received,
2502         HoldingCell,
2503         Cleared,
2504 }
2505 fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
2506         // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
2507         // along the payment path before waiting for a full commitment_signed dance. This is great, but
2508         // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
2509         // reconnects, and then has to re-send its update_fulfill_htlc message again.
2510         // In previous code, we didn't handle the double-claim correctly, spuriously closing the
2511         // channel on which the inbound HTLC was received.
2512         let chanmon_cfgs = create_chanmon_cfgs(3);
2513         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
2514         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
2515         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
2516
2517         create_announced_chan_between_nodes(&nodes, 0, 1);
2518         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
2519
2520         let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
2521
2522         let mut as_raa = None;
2523         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2524                 // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
2525                 // awaiting a remote revoke_and_ack from nodes[0].
2526                 let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
2527                 nodes[0].node.send_payment_with_route(&route, second_payment_hash,
2528                         RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap();
2529                 check_added_monitors!(nodes[0], 1);
2530
2531                 let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
2532                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
2533                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
2534                 check_added_monitors!(nodes[1], 1);
2535
2536                 let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
2537                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
2538                 check_added_monitors!(nodes[0], 1);
2539                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
2540                 check_added_monitors!(nodes[0], 1);
2541
2542                 as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
2543         }
2544
2545         let fulfill_msg = msgs::UpdateFulfillHTLC {
2546                 channel_id: chan_id_2,
2547                 htlc_id: 0,
2548                 payment_preimage,
2549         };
2550         if second_fails {
2551                 nodes[2].node.fail_htlc_backwards(&payment_hash);
2552                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]);
2553                 check_added_monitors!(nodes[2], 1);
2554                 get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2555         } else {
2556                 nodes[2].node.claim_funds(payment_preimage);
2557                 check_added_monitors!(nodes[2], 1);
2558                 expect_payment_claimed!(nodes[2], payment_hash, 100_000);
2559
2560                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
2561                 assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
2562                 // Check that the message we're about to deliver matches the one generated:
2563                 assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
2564         }
2565         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
2566         expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
2567         check_added_monitors!(nodes[1], 1);
2568
2569         let mut bs_updates = None;
2570         if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
2571                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2572                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2573                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2574                 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2575                 if htlc_status == HTLCStatusAtDupClaim::Cleared {
2576                         commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2577                         expect_payment_path_successful!(nodes[0]);
2578                 }
2579         } else {
2580                 assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2581         }
2582
2583         nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
2584         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
2585
2586         if second_fails {
2587                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2588                 reconnect_args.pending_htlc_fails.0 = 1;
2589                 reconnect_nodes(reconnect_args);
2590                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
2591         } else {
2592                 let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
2593                 reconnect_args.pending_htlc_claims.0 = 1;
2594                 reconnect_nodes(reconnect_args);
2595         }
2596
2597         if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
2598                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
2599                 check_added_monitors!(nodes[1], 1);
2600                 expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
2601
2602                 bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
2603                 assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
2604                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
2605                 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
2606         }
2607         if htlc_status != HTLCStatusAtDupClaim::Cleared {
2608                 commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
2609                 expect_payment_path_successful!(nodes[0]);
2610         }
2611 }
2612
2613 #[test]
2614 fn test_reconnect_dup_htlc_claims() {
2615         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
2616         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
2617         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
2618         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
2619         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
2620         do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
2621 }
2622
2623 #[test]
2624 fn test_temporary_error_during_shutdown() {
2625         // Test that temporary failures when updating the monitor's shutdown script delay cooperative
2626         // close.
2627         let mut config = test_default_channel_config();
2628         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2629
2630         let chanmon_cfgs = create_chanmon_cfgs(2);
2631         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2632         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
2633         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2634
2635         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
2636
2637         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2638         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2639
2640         nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
2641         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
2642         check_added_monitors!(nodes[1], 1);
2643
2644         nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()));
2645         check_added_monitors!(nodes[0], 1);
2646
2647         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2648
2649         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2650         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2651
2652         let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2653         nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2654         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()));
2655
2656         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2657
2658         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2659         let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2660         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
2661
2662         nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
2663         let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
2664         let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2665
2666         nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap());
2667         let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
2668         assert!(none_b.is_none());
2669         let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2670
2671         assert_eq!(txn_a, txn_b);
2672         assert_eq!(txn_a.len(), 1);
2673         check_spends!(txn_a[0], funding_tx);
2674         check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
2675         check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
2676 }
2677
2678 #[test]
2679 fn test_permanent_error_during_sending_shutdown() {
2680         // Test that permanent failures when updating the monitor's shutdown script result in a force
2681         // close when initiating a cooperative close.
2682         let mut config = test_default_channel_config();
2683         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2684
2685         let chanmon_cfgs = create_chanmon_cfgs(2);
2686         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2687         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]);
2688         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2689
2690         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2691         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2692
2693         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2694
2695         // We always send the `shutdown` response when initiating a shutdown, even if we immediately
2696         // close the channel thereafter.
2697         let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
2698         assert_eq!(msg_events.len(), 3);
2699         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2700         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2701         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2702
2703         check_added_monitors!(nodes[0], 2);
2704         check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2705                 [nodes[1].node.get_our_node_id()], 100000);
2706 }
2707
2708 #[test]
2709 fn test_permanent_error_during_handling_shutdown() {
2710         // Test that permanent failures when updating the monitor's shutdown script result in a force
2711         // close when handling a cooperative close.
2712         let mut config = test_default_channel_config();
2713         config.channel_handshake_config.commit_upfront_shutdown_pubkey = false;
2714
2715         let chanmon_cfgs = create_chanmon_cfgs(2);
2716         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2717         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]);
2718         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2719
2720         let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
2721         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::PermanentFailure);
2722
2723         assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
2724         let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
2725         nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2726
2727         // We always send the `shutdown` response when receiving a shutdown, even if we immediately
2728         // close the channel thereafter.
2729         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2730         assert_eq!(msg_events.len(), 3);
2731         if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2732         if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2733         if let MessageSendEvent::HandleError { .. } =  msg_events[2] {} else { panic!(); }
2734
2735         check_added_monitors!(nodes[1], 2);
2736         check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2737                 [nodes[0].node.get_our_node_id()], 100000);
2738 }
2739
2740 #[test]
2741 fn double_temp_error() {
2742         // Test that it's OK to have multiple `ChainMonitor::update_channel` calls fail in a row.
2743         let chanmon_cfgs = create_chanmon_cfgs(2);
2744         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2745         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
2746         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2747
2748         let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
2749
2750         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2751         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
2752
2753         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2754         // `claim_funds` results in a ChannelMonitorUpdate.
2755         nodes[1].node.claim_funds(payment_preimage_1);
2756         check_added_monitors!(nodes[1], 1);
2757         let (funding_tx, latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2758
2759         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2760         // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`,
2761         // which had some asserts that prevented it from being called twice.
2762         nodes[1].node.claim_funds(payment_preimage_2);
2763         check_added_monitors!(nodes[1], 1);
2764         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
2765
2766         let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
2767         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1);
2768         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2769         check_added_monitors!(nodes[1], 0);
2770         nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2);
2771
2772         // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions
2773         // and get both PaymentClaimed events at once.
2774         let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2775
2776         let events = nodes[1].node.get_and_clear_pending_events();
2777         assert_eq!(events.len(), 2);
2778         match events[0] {
2779                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1),
2780                 _ => panic!("Unexpected Event: {:?}", events[0]),
2781         }
2782         match events[1] {
2783                 Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2),
2784                 _ => panic!("Unexpected Event: {:?}", events[1]),
2785         }
2786
2787         assert_eq!(msg_events.len(), 1);
2788         let (update_fulfill_1, commitment_signed_b1, node_id) = {
2789                 match &msg_events[0] {
2790                         &MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
2791                                 assert!(update_add_htlcs.is_empty());
2792                                 assert_eq!(update_fulfill_htlcs.len(), 1);
2793                                 assert!(update_fail_htlcs.is_empty());
2794                                 assert!(update_fail_malformed_htlcs.is_empty());
2795                                 assert!(update_fee.is_none());
2796                                 (update_fulfill_htlcs[0].clone(), commitment_signed.clone(), node_id.clone())
2797                         },
2798                         _ => panic!("Unexpected event"),
2799                 }
2800         };
2801         assert_eq!(node_id, nodes[0].node.get_our_node_id());
2802         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
2803         check_added_monitors!(nodes[0], 0);
2804         expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
2805         nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
2806         check_added_monitors!(nodes[0], 1);
2807         nodes[0].node.process_pending_htlc_forwards();
2808         let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
2809         check_added_monitors!(nodes[1], 0);
2810         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
2811         nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_a1);
2812         check_added_monitors!(nodes[1], 1);
2813         nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed_a1);
2814         check_added_monitors!(nodes[1], 1);
2815
2816         // Complete the second HTLC.
2817         let ((update_fulfill_2, commitment_signed_b2), raa_b2) = {
2818                 let events = nodes[1].node.get_and_clear_pending_msg_events();
2819                 assert_eq!(events.len(), 2);
2820                 (match &events[0] {
2821                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
2822                                 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2823                                 assert!(updates.update_add_htlcs.is_empty());
2824                                 assert!(updates.update_fail_htlcs.is_empty());
2825                                 assert!(updates.update_fail_malformed_htlcs.is_empty());
2826                                 assert!(updates.update_fee.is_none());
2827                                 assert_eq!(updates.update_fulfill_htlcs.len(), 1);
2828                                 (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
2829                         },
2830                         _ => panic!("Unexpected event"),
2831                 },
2832                  match events[1] {
2833                          MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
2834                                  assert_eq!(*node_id, nodes[0].node.get_our_node_id());
2835                                  (*msg).clone()
2836                          },
2837                          _ => panic!("Unexpected event"),
2838                  })
2839         };
2840         nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_b2);
2841         check_added_monitors!(nodes[0], 1);
2842         expect_payment_path_successful!(nodes[0]);
2843
2844         nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_2);
2845         check_added_monitors!(nodes[0], 0);
2846         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2847         commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false);
2848         expect_payment_sent!(nodes[0], payment_preimage_2);
2849 }
2850
2851 fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
2852         // Test that if the monitor update generated in funding_signed is stored async and we restart
2853         // with the latest ChannelManager but the ChannelMonitor persistence never completed we happily
2854         // drop the channel and move on.
2855         let chanmon_cfgs = create_chanmon_cfgs(2);
2856         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2857
2858         let persister;
2859         let new_chain_monitor;
2860
2861         let mut chan_config = test_default_channel_config();
2862         chan_config.manually_accept_inbound_channels = true;
2863         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2864
2865         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2866         let nodes_0_deserialized;
2867
2868         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2869
2870         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2871         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2872
2873         let events = nodes[1].node.get_and_clear_pending_events();
2874         assert_eq!(events.len(), 1);
2875         match events[0] {
2876                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2877                         if use_0conf {
2878                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2879                         } else {
2880                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2881                         }
2882                 },
2883                 _ => panic!("Unexpected event"),
2884         };
2885
2886         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2887
2888         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2889
2890         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2891         check_added_monitors!(nodes[0], 0);
2892
2893         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2894         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2895         check_added_monitors!(nodes[1], 1);
2896         expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
2897
2898         let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events();
2899         assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 });
2900         match &bs_signed_locked[0] {
2901                 MessageSendEvent::SendFundingSigned { msg, .. } => {
2902                         chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2903
2904                         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &msg);
2905                         check_added_monitors!(nodes[0], 1);
2906                 }
2907                 _ => panic!("Unexpected event"),
2908         }
2909         if use_0conf {
2910                 match &bs_signed_locked[1] {
2911                         MessageSendEvent::SendChannelReady { msg, .. } => {
2912                                 nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &msg);
2913                         }
2914                         _ => panic!("Unexpected event"),
2915                 }
2916         }
2917
2918         assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
2919         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
2920         assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
2921
2922         // nodes[0] is now waiting on the first ChannelMonitor persistence to complete in order to
2923         // broadcast the funding transaction. If nodes[0] restarts at this point with the
2924         // ChannelMonitor lost, we should simply discard the channel.
2925
2926         // The test framework checks that watched_txn/outputs match the monitor set, which they will
2927         // not, so we have to clear them here.
2928         nodes[0].chain_source.watched_txn.lock().unwrap().clear();
2929         nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
2930
2931         reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2932         check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
2933         assert!(nodes[0].node.list_channels().is_empty());
2934 }
2935
2936 #[test]
2937 fn test_outbound_reload_without_init_mon() {
2938         do_test_outbound_reload_without_init_mon(true);
2939         do_test_outbound_reload_without_init_mon(false);
2940 }
2941
2942 fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: bool) {
2943         // Test that if the monitor update generated by funding_transaction_generated is stored async
2944         // and we restart with the latest ChannelManager but the ChannelMonitor persistence never
2945         // completed we happily drop the channel and move on.
2946         let chanmon_cfgs = create_chanmon_cfgs(2);
2947         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
2948
2949         let persister;
2950         let new_chain_monitor;
2951
2952         let mut chan_config = test_default_channel_config();
2953         chan_config.manually_accept_inbound_channels = true;
2954         chan_config.channel_handshake_limits.trust_own_funding_0conf = true;
2955
2956         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config), Some(chan_config)]);
2957         let nodes_1_deserialized;
2958
2959         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
2960
2961         nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
2962         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
2963
2964         let events = nodes[1].node.get_and_clear_pending_events();
2965         assert_eq!(events.len(), 1);
2966         match events[0] {
2967                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
2968                         if use_0conf {
2969                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2970                         } else {
2971                                 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0).unwrap();
2972                         }
2973                 },
2974                 _ => panic!("Unexpected event"),
2975         };
2976
2977         nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
2978
2979         let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43);
2980
2981         nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap();
2982         check_added_monitors!(nodes[0], 0);
2983
2984         let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
2985         chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
2986         nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
2987         check_added_monitors!(nodes[1], 1);
2988
2989         // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the
2990         // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding
2991         // transaction is confirmed.
2992         let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
2993
2994         nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
2995         check_added_monitors!(nodes[0], 1);
2996         expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
2997
2998         let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2999         if lock_commitment {
3000                 confirm_transaction(&nodes[0], &as_funding_tx[0]);
3001                 confirm_transaction(&nodes[1], &as_funding_tx[0]);
3002         }
3003         if use_0conf || lock_commitment {
3004                 let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
3005                 nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_ready);
3006         }
3007         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3008
3009         // nodes[1] is now waiting on the first ChannelMonitor persistence to complete in order to
3010         // move the channel to ready (or is waiting on the funding transaction to confirm). If nodes[1]
3011         // restarts at this point with the ChannelMonitor lost, we should simply discard the channel.
3012
3013         // The test framework checks that watched_txn/outputs match the monitor set, which they will
3014         // not, so we have to clear them here.
3015         nodes[1].chain_source.watched_txn.lock().unwrap().clear();
3016         nodes[1].chain_source.watched_outputs.lock().unwrap().clear();
3017
3018         reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
3019
3020         check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
3021         assert!(nodes[1].node.list_channels().is_empty());
3022 }
3023
3024 #[test]
3025 fn test_inbound_reload_without_init_mon() {
3026         do_test_inbound_reload_without_init_mon(true, true);
3027         do_test_inbound_reload_without_init_mon(true, false);
3028         do_test_inbound_reload_without_init_mon(false, true);
3029         do_test_inbound_reload_without_init_mon(false, false);
3030 }
3031
3032 #[test]
3033 fn test_blocked_chan_preimage_release() {
3034         // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to
3035         // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out.
3036         let chanmon_cfgs = create_chanmon_cfgs(3);
3037         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3038         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3039         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3040
3041         create_announced_chan_between_nodes(&nodes, 0, 1).2;
3042         create_announced_chan_between_nodes(&nodes, 1, 2).2;
3043
3044         send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
3045
3046         // Tee up two payments in opposite directions across nodes[1], one it sent to generate a
3047         // PaymentSent event and one it forwards.
3048         let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
3049         let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
3050
3051         // Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
3052         nodes[2].node.claim_funds(payment_preimage_1);
3053         check_added_monitors(&nodes[2], 1);
3054         expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000);
3055
3056         let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3057         nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3058         do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
3059         check_added_monitors(&nodes[1], 0);
3060
3061         // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
3062         // claim an HTLC on its channel with nodes[2], but that channel is blocked on the above
3063         // `PaymentSent` event.
3064         nodes[0].node.claim_funds(payment_preimage_2);
3065         check_added_monitors(&nodes[0], 1);
3066         expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000);
3067
3068         let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
3069         nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3070         check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
3071         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3072
3073         // Finish the CS dance between nodes[0] and nodes[1].
3074         do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
3075         check_added_monitors(&nodes[1], 0);
3076
3077         let events = nodes[1].node.get_and_clear_pending_events();
3078         assert_eq!(events.len(), 3);
3079         if let Event::PaymentSent { .. } = events[0] {} else { panic!(); }
3080         if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
3081         if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
3082
3083         // The event processing should release the last RAA update.
3084         check_added_monitors(&nodes[1], 1);
3085
3086         // When we fetch the next update the message getter will generate the next update for nodes[2],
3087         // generating a further monitor update.
3088         let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
3089         check_added_monitors(&nodes[1], 1);
3090
3091         nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
3092         do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
3093         expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
3094 }